diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index ea1e75657663d2452b0a4d83499274ce5d8bc0fa..a2d25f6361bcf84c27f090a318f769c9e29898ae 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -35,8 +35,8 @@ build:tests:
     - python setup.py sdist --dist-dir=$CI_PROJECT_DIR/build
   artifacts:
     paths:
-    - build/clmctest-2.0.0.tar.gz
-    - build/clmcservice-2.0.0.tar.gz
+    - build/clmctest-2.0.2.tar.gz
+    - build/clmcservice-2.0.2.tar.gz
     expire_in: 1 day
 
 test:all:
@@ -50,10 +50,10 @@ test:all:
     - echo "REPO_PASS=${REPO_PASS}" >> $CI_PROJECT_DIR/reporc
     - sudo scripts/test/fixture.sh create -f src/test/clmctest/rspec.json -r $CI_PROJECT_DIR -c all
     - sudo mkdir /var/lib/lxd/containers/test-runner/rootfs/opt/clmc/build
-    - sudo cp build/clmctest-2.0.0.tar.gz /var/lib/lxd/containers/test-runner/rootfs/opt/clmc/build
-    - sudo cp build/clmcservice-2.0.0.tar.gz /var/lib/lxd/containers/test-runner/rootfs/opt/clmc/build
-    - sudo lxc exec test-runner -- pip3 install /opt/clmc/build/clmctest-2.0.0.tar.gz
-    - sudo lxc exec test-runner -- pip3 install /opt/clmc/build/clmcservice-2.0.0.tar.gz
+    - sudo cp build/clmctest-2.0.2.tar.gz /var/lib/lxd/containers/test-runner/rootfs/opt/clmc/build
+    - sudo cp build/clmcservice-2.0.2.tar.gz /var/lib/lxd/containers/test-runner/rootfs/opt/clmc/build
+    - sudo lxc exec test-runner -- pip3 install /opt/clmc/build/clmctest-2.0.2.tar.gz
+    - sudo lxc exec test-runner -- pip3 install /opt/clmc/build/clmcservice-2.0.2.tar.gz
     - sudo lxc exec test-runner -- pytest -s --tb=short -rfp --pyargs clmctest
   when: on_success      
   
diff --git a/docs/clmc-service.md b/docs/clmc-service.md
index 4a87b60ebb670abfe0225a3c91fafb917919d3f2..bc5d6723d7c32077334b07865661e8f7f80357cb 100644
--- a/docs/clmc-service.md
+++ b/docs/clmc-service.md
@@ -228,7 +228,7 @@ with **/clmc-service** so that the nginx reverse proxy server (listening on port
         The response of this request is a JSON content, which contains a generated UUID and the database name. 
         This request UUID can then be used to manage the pipeline script (e.g. stopping it).
 
-        Returns a 400 Bad Request error if the request body is invalid.
+        Returns a 400 Bad Request error if the request body is invalid or the graph network topology has not been built, yet.
      
     * Response Body Example:
     
diff --git a/src/service/VERSION b/src/service/VERSION
index 5a6bc65ed94515a041237159e5d2c89402f12a58..3391f8417e5d96317d5c25ca088eab68c25b5f6f 100644
--- a/src/service/VERSION
+++ b/src/service/VERSION
@@ -1 +1 @@
-__version__ = "2.0.0"
\ No newline at end of file
+__version__ = "2.0.2"
\ No newline at end of file
diff --git a/src/service/clmcservice/graphapi/conftest.py b/src/service/clmcservice/graphapi/conftest.py
index f4017e807f8211183377524b2ce795590ccdbf5c..8adc2834e4866ef818ceeddbda4c64eadb81214b 100644
--- a/src/service/clmcservice/graphapi/conftest.py
+++ b/src/service/clmcservice/graphapi/conftest.py
@@ -25,7 +25,7 @@
 import pytest
 from influxdb import InfluxDBClient
 from py2neo import Graph
-from clmcservice.graphapi.utilities import build_network_graph
+from clmcservice.graphapi.utilities import build_network_graph, delete_network_graph
 
 
 # static network configuration data used for testing cases, latencies reported in milliseconds
@@ -62,6 +62,14 @@ links = [
     }
 ]
 
+sdn_switches = [
+    {"switchDPID": "dpid1", "inetAddress": "/127.0.0.1:1234"},
+    {"switchDPID": "dpid2", "inetAddress": "/127.0.0.2:1234"},
+    {"switchDPID": "dpid3", "inetAddress": "/127.0.0.3:1234"},
+    {"switchDPID": "dpid4", "inetAddress": "/127.0.0.4:1234"},
+    {"switchDPID": "dpid5", "inetAddress": "/127.0.0.5:1234"},
+    {"switchDPID": "dpid6", "inetAddress": "/127.0.0.6:1234"},
+]
 
 switches = {
     "dpid1": "127.0.0.1",
@@ -97,8 +105,6 @@ def db_testing_data():
     :return: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object
     """
 
-    global links, switches, clusters
-
     test_sfc_name = "test_sfc"
     test_sfc_instance_1_name = "test_sfc_premium"
     test_sfc_instance_2_name = "test_sfc_non_premium"
@@ -111,8 +117,6 @@ def db_testing_data():
 
     # create the physical infrastructure subgraph
     dbs = influx.get_list_database()
-    switch_count, cluster_count, ues_count = build_network_graph(graph, switches, links, clusters, ues)
-    assert switch_count == 6 and cluster_count == 6 and ues_count == 3, "Network graph build failure"
 
     # check if exists ( if so, clear ) or create the test DB in influx
     if test_db_name in dbs:
@@ -188,3 +192,19 @@ def db_testing_data():
     influx.drop_database("CLMCMetrics")
     influx.drop_database(test_db_name)
     graph.delete_all()
+
+
+@pytest.fixture
+def graph_network_topology():
+    """
+    A utility fixture to build the network topology used throughout the test cases.
+    """
+
+    global links, switches, clusters
+
+    graph = Graph(host="localhost", password="admin")
+    build_network_graph(graph, switches, links, clusters, ues)
+
+    yield
+
+    delete_network_graph(graph)
diff --git a/src/service/clmcservice/graphapi/tests.py b/src/service/clmcservice/graphapi/tests.py
index 0b07a87ff40dc7c738f0008cf135560e5327df1f..7830b4338cd51ebb17dcd97f1d63eb2efffa100a 100644
--- a/src/service/clmcservice/graphapi/tests.py
+++ b/src/service/clmcservice/graphapi/tests.py
@@ -30,6 +30,7 @@ from pyramid import testing
 from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound, HTTPInternalServerError
 from clmcservice.graphapi.views import GraphAPI
 from clmcservice.models import MonitoringProcess
+from clmcservice.graphapi.conftest import links, sdn_switches, ues, clusters
 
 
 class TestGraphAPI(object):
@@ -45,7 +46,10 @@ class TestGraphAPI(object):
 
         self.registry = testing.setUp()
         self.registry.add_settings({"neo4j_host": "localhost", "neo4j_password": "admin", "influx_host": "localhost", "influx_port": 8086, "network_bandwidth": 104857600,
-                                    "network_ues_path": "/opt/clmc/src/service/resources/GraphAPI/network_ues.json"})
+                                    "network_ues_path": "/opt/clmc/src/service/resources/GraphAPI/network_ues.json",
+                                    "network_clusters_path": "/opt/clmc/src/service/resources/GraphAPI/network_ues.json",
+                                    "sdn_controller_ip": "127.0.0.1", "sdn_controller_port": 8080
+                                    })
 
         yield
 
@@ -103,12 +107,13 @@ class TestGraphAPI(object):
         assert error_raised, error_msg
 
     @patch('clmcservice.graphapi.views.uuid4')
-    def test_build(self, uuid_mock, db_testing_data):
+    def test_build(self, uuid_mock, db_testing_data, graph_network_topology):
         """
         Tests the graph build API endpoint - it makes 2 API calls and checks that the expected graph was created (the influx data that's being used is reported to InfluxDB in the conftest file)
 
         :param uuid_mock: mock object to mock the behaviour of the uuid4 function
         :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data and the graph db client object (this is a fixture from conftest)
+        :param graph_network_topology: fixture to generate the network topology and clean it afterwards
         """
 
         from_timestamp, to_timestamp, graph_db = db_testing_data
@@ -238,12 +243,13 @@ class TestGraphAPI(object):
         assert response == {"deleted": 17}
 
     @patch('clmcservice.graphapi.views.uuid4')
-    def test_delete_temporal(self, uuid_mock, db_testing_data):
+    def test_delete_temporal(self, uuid_mock, db_testing_data, graph_network_topology):
         """
         Tests the delete API endpoint of the Graph API - the test depends on the build test to have been passed successfully so that graph_1_id and graph_2_id have been set
 
         :param uuid_mock: the mock object used to mimic the behaviour of the uuid.uuid4 function
         :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data and the graph db client object (this is a fixture from conftest)
+        :param graph_network_topology: fixture to generate the network topology and clean it afterwards
         """
 
         from_timestamp, to_timestamp, graph_db = db_testing_data
@@ -360,11 +366,12 @@ class TestGraphAPI(object):
             error_raised = True
         assert error_raised, error_msg
 
-    def test_rtt(self, db_testing_data):
+    def test_rtt(self, db_testing_data, graph_network_topology):
         """
         Tests the rtt API endpoint of the Graph API.
 
         :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data and the graph db client object (this is a fixture from conftest)
+        :param graph_network_topology: fixture to generate the network topology and clean it afterwards
         """
 
         from_timestamp, to_timestamp, graph_db = db_testing_data
@@ -461,16 +468,93 @@ class TestGraphAPI(object):
             assert response == {"forward_latencies": forward_latencies, "reverse_latencies": reverse_latencies, "total_forward_latency": sum(forward_latencies), "total_reverse_latency": sum(reverse_latencies),
                                 "bandwidth": 104857600, "response_time": response_time, "local_tags": {"traffic_source": startpoint}, "global_tags": global_tags}, "Incorrect RTT response"
 
-    def test_delete_network_graph(self):
+    @patch('clmcservice.graphapi.views.load')
+    @patch('clmcservice.graphapi.views.open')
+    @patch('clmcservice.graphapi.views.get')
+    def test_build_network(self, http_get_mock, file_open_mock, json_load_mock, db_testing_data):
+        """
+        Tests the functionality to build the network graph.
+
+        :param http_get_mock: mocks the HTTP GET function
+        :param file_open_mock: mocks the open file function
+        :param json_load_mock: mocks the JSON load function
+        :param db_testing_data: fixture used to get a reference to the graph DB
+        """
+
+        from_timestamp, to_timestamp, graph_db = db_testing_data  # fixture, used to get reference to the graph DB
+
+        # mock the responses from the sdn controller - 3 GET requests are executed, so we need 3 responses
+        mock_response1 = Mock()
+        mock_response1.status_code = 200
+        mock_response1.json.return_value = sdn_switches
+        mock_response2 = Mock()
+        mock_response2.status_code = 200
+        mock_response2.json.return_value = links
+        mock_response3 = Mock()
+        mock_response3.status_code = 200
+        mock_response3.json.return_value = links
+        # we are doing two calls to the API, hence need to repeat the responses
+        http_get_mock.side_effect = [mock_response1, mock_response2, mock_response3, mock_response1, mock_response2, mock_response3]
+
+        # mock the behaviour of reading the clusters and ues mappping from files
+        file_open_mock.return_value = MagicMock()  # use magic mock so that special methods (dunders) are auto generated
+        # we are doing two calls to the API, hence need to repeat the results
+        json_load_mock.side_effect = [clusters, ues, clusters, ues]
+
+        assert set([node["name"] for node in graph_db.nodes.match("Cluster")]) == set(), "Cluster nodes must not be created before the build request"
+        assert set([node["name"] for node in graph_db.nodes.match("Switch")]) == set(), "Switch nodes must not be created before the build request"
+        assert set([node["name"] for node in graph_db.nodes.match("UserEquipment")]) == set(), "UE nodes must not be created before the build request"
+
+        # sent request to build the network topology
+        request = testing.DummyRequest()
+        response = GraphAPI(request).build_network_topology()
+        assert response == {"new_switches_count": 6, "new_clusters_count": 6, "new_ues_count": 3}
+
+        assert set([node["name"] for node in graph_db.nodes.match("Cluster")]) == set(["DC" + str(i) for i in range(1, 7)]), "Cluster nodes must have been created"
+        assert set([node["name"] for node in graph_db.nodes.match("Switch")]) == set(["127.0.0." + str(i) for i in range(1, 7)]), "Switch nodes must have been created"
+        assert set([node["name"] for node in graph_db.nodes.match("UserEquipment")]) == set(["ue" + str(i) for i in (2, 3, 6)]), "UE nodes must have been created"
+
+        # send the same request and ensure that nothing else is created
+        request = testing.DummyRequest()
+        response = GraphAPI(request).build_network_topology()
+        assert response == {"new_switches_count": 0, "new_clusters_count": 0, "new_ues_count": 0}
+
+        assert set([node["name"] for node in graph_db.nodes.match("Cluster")]) == set(["DC" + str(i) for i in range(1, 7)])
+        assert set([node["name"] for node in graph_db.nodes.match("Switch")]) == set(["127.0.0." + str(i) for i in range(1, 7)])
+        assert set([node["name"] for node in graph_db.nodes.match("UserEquipment")]) == set(["ue" + str(i) for i in (2, 3, 6)])
+
+    def test_delete_network(self, graph_network_topology, db_testing_data):
         """
         Tests the delete network graph functionality.
+
+        :param graph_network_topology: fixture to generate the network topology and clean it afterwards
+        :param db_testing_data: fixture used to get a reference to the graph DB
         """
 
+        from_timestamp, to_timestamp, graph_db = db_testing_data  # fixture, used to get reference to the graph DB
+
+        assert set([node["name"] for node in graph_db.nodes.match("Cluster")]) == set(["DC" + str(i) for i in range(1, 7)]), "Cluster nodes must have been created before the delete request"
+        assert set([node["name"] for node in graph_db.nodes.match("Switch")]) == set(["127.0.0." + str(i) for i in range(1, 7)]), "Switch nodes must have been created before the delete request"
+        assert set([node["name"] for node in graph_db.nodes.match("UserEquipment")]) == set(["ue" + str(i) for i in (2, 3, 6)]), "UE nodes must have been created before the delete request"
+
+        # send the first delete request
         request = testing.DummyRequest()
         response = GraphAPI(request).delete_network_topology()
-
         assert response == {"deleted_switches_count": 6, "deleted_clusters_count": 6, "deleted_ues_count": 3}
 
+        assert set([node["name"] for node in graph_db.nodes.match("Cluster")]) == set(), "Cluster nodes must have been deleted"
+        assert set([node["name"] for node in graph_db.nodes.match("Switch")]) == set(), "Switch nodes must have been deleted"
+        assert set([node["name"] for node in graph_db.nodes.match("UserEquipment")]) == set(), "UE nodes must have been deleted"
+
+        # send the same request and ensure that nothing else is deleted
+        request = testing.DummyRequest()
+        response = GraphAPI(request).delete_network_topology()
+        assert response == {"deleted_switches_count": 0, "deleted_clusters_count": 0, "deleted_ues_count": 0}
+
+        assert set([node["name"] for node in graph_db.nodes.match("Cluster")]) == set()
+        assert set([node["name"] for node in graph_db.nodes.match("Switch")]) == set()
+        assert set([node["name"] for node in graph_db.nodes.match("UserEquipment")]) == set()
+
     @pytest.mark.parametrize("body, expected_error, error_msg", [
         (None, "Configuration must be a JSON object.", "A bad request error must have been raised in case of missing request body."),
         ('{"query_period": 45, "results_measurement_name": "graph"}', "Invalid JSON query document.", "A bad request error must have been raised in case of invalid request body."),
@@ -518,18 +602,16 @@ class TestGraphAPI(object):
             error_raised = True
         assert error_raised, error_msg
 
-    @patch('clmcservice.graphapi.views.load')
-    @patch('clmcservice.graphapi.views.open')
+    @patch('clmcservice.graphapi.views.Graph')
     @patch('clmcservice.graphapi.views.Popen')
     @patch('clmcservice.graphapi.views.uuid4')
-    def test_execute_graph_pipeline(self, uuid_mock, popen_mock, fileopen_mock, jsonload_mock):
+    def test_execute_graph_pipeline(self, uuid_mock, popen_mock, graph_mock):
         """
         Tests the functionality to start a pipeline script executing the graph API workflow - build, query, delete.
 
         :param uuid_mock: mock object for the uuid generator function
         :param popen_mock: mock object for the process creation function
-        :param fileopen_mock: mock object the mimic the behaviour of opening a file
-        :param jsonload_mock: mock object to mimic the behaviour of the JSON load function
+        :param graph_mock: mock object for the graph DB client
         """
 
         # mock the behaviour of the uuid4 function
@@ -543,12 +625,11 @@ class TestGraphAPI(object):
         type(popen_intance_mock).returncode = returncode_property_mock  # a property mock cannot be attached directly to the mock object, hence use its type object
         popen_mock.return_value = popen_intance_mock
 
-        # mock the behaviur of the open() and load() function
-        fileopen_mock.return_value = MagicMock()  # a magic mock is needed so that the dunder methods __enter__ and __exit__ are generated
-        ues_dict = {"127.0.0.1": "ue1", "127.0.0.2": "ue2", "127.0.0.3": "ue3"}
-        jsonload_mock.return_value = ues_dict
+        # mock the behaviour of the graph nodes match function
+        nodes_matcher_mock = Mock(return_value=[{"name": "ue1"}, {"name": "ue2"}, {"name": "ue3"}])  # the API method expects node objects with 'name' key (node['name'])
+        graph_mock.return_value.nodes.match = nodes_matcher_mock
 
-        # check proper behaviour
+        # check proper behaviour of the API endpoint
         service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)",
                                         "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
                                  minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
@@ -562,14 +643,19 @@ class TestGraphAPI(object):
         response = GraphAPI(request).execute_graph_pipeline()
         assert response == {"uuid": uuid_mock.return_value, "database": "test_sfc"}
 
-        monitor_json_body["ues"] = list(ues_dict.values())
+        # the API endpoint should have done this - appending the list of UEs to the JSON config of the pipeline script
+        monitor_json_body["ues"] = [node["name"] for node in nodes_matcher_mock.return_value]
 
         # assert that the graph pipeline script is ran with the JSON config that was received in the request along with the UEs
-        actual_call_arguments = popen_mock.call_args[0][0]  # we expect exactly one call to Popen() with one argument which is a list
-        assert actual_call_arguments[0] == "graph-pipeline.sh", "Incorrect graph pipeline script name"
-        assert loads(actual_call_arguments[1]) == monitor_json_body, "Incorrect JSON configuration passed to pipeline script"
+        # we expect one call to the Popen() mock (first 0 index), we take the proper arguments, not kwargs (second 0 index)
+        actual_call_arguments = popen_mock.call_args_list[0][0]
+        # expecting one argument which is a list
+        assert len(actual_call_arguments) == 1 and type(actual_call_arguments[0]) == list, "Incorrect call to Popen"
+        assert actual_call_arguments[0][0] == "graph-pipeline.sh", "Incorrect graph pipeline script name"
+        assert loads(actual_call_arguments[0][1]) == monitor_json_body, "Incorrect JSON configuration passed to pipeline script"
         pid_property_mock.assert_called_once_with()  # assert that the process ID attribute was called and saved
         returncode_property_mock.assert_called_once_with()  # assert that the process return code attribute was called to check if the process has started successfully
+        nodes_matcher_mock.assert_called_once_with("UserEquipment")  # assert that the graph nodes match function has been called with "UserEquipment" as argument
 
         # check that the process ID was saved
         assert MonitoringProcess.exists(uuid_mock.return_value), "Request identifier was not saved during the request processing"
@@ -586,12 +672,29 @@ class TestGraphAPI(object):
             error_raised = True
         assert error_raised, "Expecting a 500 HTTP error if the process terminated immediately after it was started"
 
-        # assert that the graph pipeline script is ran with the JSON config that was received in the request along with the UEs
-        actual_call_arguments = popen_mock.call_args[0][0]  # we expect exactly one call to Popen() with one argument which is a list
-        assert actual_call_arguments[0] == "graph-pipeline.sh", "Incorrect graph pipeline script name"
-        assert loads(actual_call_arguments[1]) == monitor_json_body, "Incorrect JSON configuration passed to pipeline script"
+        # assert that the graph pipeline script was still started with the JSON config that was received in the request along with the UEs
+        # we expect a second call to the Popen() mock (index 1), we take the proper arguments, not kwargs (first 0 index)
+        actual_call_arguments = popen_mock.call_args_list[1][0]
+        # expecting one argument which is a list
+        assert len(actual_call_arguments) == 1 and type(actual_call_arguments[0]) == list, "Incorrect call to Popen"
+        assert actual_call_arguments[0][0] == "graph-pipeline.sh", "Incorrect graph pipeline script name"
+        assert loads(actual_call_arguments[0][1]) == monitor_json_body, "Incorrect JSON configuration passed to pipeline script"
         pid_property_mock.assert_called_with()  # assert that the process ID attribute was called and saved
         returncode_property_mock.assert_called_with()  # assert that the process return code attribute was called to check if the process has started successfully
+        nodes_matcher_mock.assert_called_with("UserEquipment")  # assert that the graph nodes match function has been called with "UserEquipment" as argument
+
+        # check erroneous behaviour - no UE nodes are found, expecting bad request
+        nodes_matcher_mock.return_value = []
+        request = testing.DummyRequest()
+        request.body = body.encode(request.charset)
+        error_raised = False
+        try:
+            GraphAPI(request).execute_graph_pipeline()
+        except HTTPBadRequest:
+            error_raised = True
+        assert error_raised, "Expecting a 400 HTTP Bad Request error if no UE nodes were found (network topology not built)."
+        assert len(popen_mock.call_args_list) == 2, "No subprocess should be started if the UE nodes list is empty (network topology not built)"
+        nodes_matcher_mock.assert_called_with("UserEquipment")  # assert that the graph nodes match function has been called with "UserEquipment" as argument
 
     @patch('clmcservice.graphapi.views.kill')
     def test_stop_graph_pipeline(self, mock_kill):
diff --git a/src/service/clmcservice/graphapi/utilities.py b/src/service/clmcservice/graphapi/utilities.py
index 10b0a311c89e761d0abaa03059d5af9a98ded710..b8e736dd6e5eec3b08fbe2baceb7048aa16b27ab 100644
--- a/src/service/clmcservice/graphapi/utilities.py
+++ b/src/service/clmcservice/graphapi/utilities.py
@@ -165,7 +165,7 @@ def find_or_create_node(graph, node_type, return_created=False, **properties):
     """
     This function checks if a node of the given type with the given properties exists, and if not - creates it.
 
-    :param graph: the graph object
+    :param graph: the graph DB object
     :param node_type: the type of the node to find or create
     :param return_created: if True the result will contain both the node and a boolean flag if the node was created now
     :param properties: the properties of the node to find or create
@@ -194,7 +194,7 @@ def find_or_create_edge(graph, edge_type, from_node, to_node, **properties):
     """
     This function checks if an edge of the given type with the given properties exists, and if not - creates it.
 
-    :param graph: the graph object
+    :param graph: the graph DB object
     :param edge_type: the type of the edge to find or create
     :param from_node: the source of the edge
     :param to_node: the target of the edge
@@ -211,6 +211,28 @@ def find_or_create_edge(graph, edge_type, from_node, to_node, **properties):
     return edge
 
 
+def delete_nodes_with_type(graph, node_type):
+    """
+    This function deletes all nodes of a given type from the graph.
+
+    :param graph: the graph DB object
+    :param node_type: the type of the nodes to delete, e.g. Switch
+
+    :return: the number of deleted nodes
+    """
+
+    log.info("Deleting {0} nodes.".format(node_type))
+
+    subgraph = graph.nodes.match(node_type)
+    deleted_nodes = len(subgraph)
+    for node in subgraph:
+        graph.delete(node)
+
+    log.info("Deleted {0} {1} nodes.".format(deleted_nodes, node_type))
+
+    return deleted_nodes
+
+
 def build_temporal_subgraph(request_id, from_timestamp, to_timestamp, json_queries, graph, influx_client):
     """
     A function used to generate a temporal graph in the neo4j db.
@@ -232,7 +254,7 @@ def build_temporal_subgraph(request_id, from_timestamp, to_timestamp, json_queri
     db = sfc
     rp = "autogen"
 
-    log.info("Building graph for service function chain {0}/{1} from database {2} with retention policy {3}".format(sfc, sfci, db, rp))
+    log.info("Building graph for service function chain {0}/{1} from database {2} with retention policy {3} and request ID {4}".format(sfc, sfci, db, rp, request_id))
 
     # create a UUID reference node
     reference_node = Node("Reference", **{"uuid": request_id, "sfc": sfc, "sfci": sfci, "from": from_timestamp, "to": to_timestamp})
@@ -408,38 +430,15 @@ def create_node_from_mapping(graph, node, node_ip, mapping, new_node_type):
 
 def delete_network_graph(graph):
     """
-    A function used to delete all nodes of type Switch and Cluster in the neo4j graph.
+    A function used to delete all nodes of type Switch, Cluster and UserEquipment in the neo4j graph.
 
     :param graph: the neo4j graph
-    :return: the number of deleted switches and clusters
+    :return: the number of deleted switches, clusters and UEs
     """
 
-    log.info("Deleting Switch nodes.".format())
-
-    subgraph = graph.nodes.match("Switch")
-    deleted_switches = len(subgraph)
-    for node in subgraph:
-        graph.delete(node)
-
-    log.info("Deleted {0} Switch nodes.".format(deleted_switches))
-
-    log.info("Deleting Cluster nodes.")
-
-    subgraph = graph.nodes.match("Cluster")
-    deleted_clusters = len(subgraph)
-    for node in subgraph:
-        graph.delete(node)
-
-    log.info("Deleted {0} Cluster nodes.".format(deleted_clusters))
-
-    log.info("Deleting UserEquipment nodes.")
-
-    subgraph = graph.nodes.match("UserEquipment")
-    deleted_ues = len(subgraph)
-    for node in subgraph:
-        graph.delete(node)
-
-    log.info("Deleted {0} UserEquipment nodes.".format(deleted_clusters))
+    deleted_switches = delete_nodes_with_type(graph, "Switch")
+    deleted_clusters = delete_nodes_with_type(graph, "Cluster")
+    deleted_ues = delete_nodes_with_type(graph, "UserEquipment")
 
     return deleted_switches, deleted_clusters, deleted_ues
 
diff --git a/src/service/clmcservice/graphapi/views.py b/src/service/clmcservice/graphapi/views.py
index 582b731fee4cfbf215f0386a688cc470f34641d9..8aff5faf612a924f1e643159082692ae1f48fe26 100644
--- a/src/service/clmcservice/graphapi/views.py
+++ b/src/service/clmcservice/graphapi/views.py
@@ -263,78 +263,28 @@ class GraphAPI(object):
         sdn_controller_ip = self.request.registry.settings['sdn_controller_ip']
         sdn_controller_port = self.request.registry.settings['sdn_controller_port']
 
-        # retrieve all switches - if SDN controller is unavailable on the given IP address return 503 Service Unavailable
-        try:
-            url = "http://{0}:{1}{2}".format(sdn_controller_ip, sdn_controller_port, "/wm/core/controller/switches/json")
-            response = get(url)
-        except exceptions.ConnectionError:
-            msg = "The SDN controller is not available on IP {0} and port {1}.".format(sdn_controller_ip, sdn_controller_port)
-            log.error("Unexpected error: {0}".format(msg))
-            raise HTTPServiceUnavailable("The SDN controller couldn't be reached when trying to build the network topology.")
-
-        # check if the SDN controller returned the expected response
-        if response.status_code != 200:
-            msg = "The SDN controller returned a response with status code different than 200."
-            log.error("Unexpected error: {0}".format(msg))
-            raise HTTPNotImplemented("The SDN controller failed to return a successful response when querying for the list of switches.")
-
-        try:
-            content = response.json()
-        except ValueError:  # response not in JSON
-            msg = "The SDN controller returned a response which couldn't be converted to JSON."
-            log.error("Unexpected error: {0}".format(msg))
-            raise HTTPNotImplemented("The SDN controller failed to return a valid JSON response when querying for the list of switches.")
+        # retrieve all switches
+        url = "http://{0}:{1}{2}".format(sdn_controller_ip, sdn_controller_port, "/wm/core/controller/switches/json")
+        log.info("Fetching the list of switches from the SDN controller - {0}".format(url))
+        switches_json_response = self.get_sdn_controller_response(url)
 
         # map the DPID of each switch to its IP address
         switches = {}
-        for switch in content:
+        for switch in switches_json_response:
             # map the dpid to the switch IP address, the IP address is in the format '/172.168.23.54:1234'
             switches[switch["switchDPID"]] = switch["inetAddress"][1:].split(":")[0]
 
-        # retrieve all external links (gathered through BDDP) - if SDN controller is unavailable on the given IP address return 503 Service Unavailable
-        try:
-            url = "http://{0}:{1}{2}".format(sdn_controller_ip, sdn_controller_port, "/wm/topology/external-links/json")
-            response = get(url)
-        except exceptions.ConnectionError:
-            msg = "The SDN controller is not available on IP {0} and port {1}.".format(sdn_controller_ip, sdn_controller_port)
-            log.error("Unexpected error: {0}".format(msg))
-            raise HTTPServiceUnavailable("The SDN controller couldn't be reached when trying to build the network topology.")
-
-        # check if the SDN controller returned the expected response
-        if response.status_code != 200:
-            msg = "The SDN controller returned a response with status code different than 200."
-            log.error("Unexpected error: {0}".format(msg))
-            raise HTTPNotImplemented("The SDN controller failed to return a successful response when querying for the network topology.")
-
-        try:
-            external_links = response.json()
-        except ValueError:  # response not in JSON
-            msg = "The SDN controller returned a response which couldn't be converted to JSON."
-            log.error("Unexpected error: {0}".format(msg))
-            raise HTTPNotImplemented("The SDN controller failed to return a valid JSON response when querying for the network topology.")
+        # retrieve all external links (gathered through BDDP)
+        url = "http://{0}:{1}{2}".format(sdn_controller_ip, sdn_controller_port, "/wm/topology/external-links/json")
+        log.info("Fetching all external links from the SDN controller - {0}".format(url))
+        external_links = self.get_sdn_controller_response(url)
 
-        # retrieve all local links (gathered through LLDP) - if SDN controller is unavailable on the given IP address return 503 Service Unavailable
-        try:
-            url = "http://{0}:{1}{2}".format(sdn_controller_ip, sdn_controller_port, "/wm/topology/links/json")
-            response = get(url)
-        except exceptions.ConnectionError:
-            msg = "The SDN controller is not available on IP {0} and port {1}.".format(sdn_controller_ip, sdn_controller_port)
-            log.error("Unexpected error: {0}".format(msg))
-            raise HTTPServiceUnavailable("The SDN controller couldn't be reached when trying to build the network topology.")
-
-        if response.status_code != 200:
-            msg = "The SDN controller returned a response with status code different than 200."
-            log.error("Unexpected error: {0}".format(msg))
-            raise HTTPNotImplemented("The SDN controller failed to return a successful response when querying for the network topology.")
-
-        try:
-            local_links = response.json()
-        except ValueError:  # response not in JSON
-            msg = "The SDN controller returned a response which couldn't be converted to JSON."
-            log.error("Unexpected error: {0}".format(msg))
-            raise HTTPNotImplemented("The SDN controller failed to return a valid JSON response when querying for the network topology.")
+        # retrieve all local links (gathered through LLDP)
+        url = "http://{0}:{1}{2}".format(sdn_controller_ip, sdn_controller_port, "/wm/topology/links/json")
+        log.info("Fetching all local links from the SDN controller - {0}".format(url))
+        local_links = self.get_sdn_controller_response(url)
 
-        # TODO this is a temporary solution - currently the service router to clusters mapping is read from a file (which must be manually prepared beforehand)
+        # TODO this is a temporary solution - currently the service router to cluster mapping is read from a file (which must be manually prepared beforehand)
         clusters_file = self.request.registry.settings["network_clusters_path"]
         try:
             with open(clusters_file) as fh:
@@ -344,7 +294,7 @@ class GraphAPI(object):
             log.error("No service-router-to-cluster mapping was found while building the network topology.")
             clusters = {}
 
-        # TODO this is a temporary solution - currently the service router to ues mapping is read from a file (which must be manually prepared beforehand)
+        # TODO this is a temporary solution - currently the service router to ue mapping is read from a file (which must be manually prepared beforehand)
         ues_file = self.request.registry.settings["network_ues_path"]
         try:
             with open(ues_file) as fh:
@@ -363,6 +313,43 @@ class GraphAPI(object):
 
         return {"new_switches_count": switch_count, "new_clusters_count": clusters_count, "new_ues_count": ues_count}
 
+    @staticmethod
+    def get_sdn_controller_response(url):
+        """
+        Send a GET request to the SDN controller and validate the response - expecting a JSON response (currently only applicable for Floodlight)
+
+        :param url: the full url - SDN controller IP and port + API endpoint path
+
+        :return: the response in JSON
+
+        :raises HTTPServiceUnavailable: if the SDN controller couldn't be reached on the given IP and port
+        :raises HTTPNotImplemented: if the SDN controller returned status code different than 200 or the response couldn't be converted to JSON
+        """
+
+        # try getting any response from the controller
+        try:
+            response = get(url)
+        except exceptions.ConnectionError:
+            msg = "The SDN controller is not available - request was sent to {0}.".format(url)
+            log.error("Unexpected error: {0}".format(msg))
+            raise HTTPServiceUnavailable("The SDN controller couldn't be reached when trying to build the network topology.")
+
+        # check if the SDN controller returned the expected response
+        if response.status_code != 200:
+            msg = "The SDN controller returned a response with status code different than 200 - request was sent to {0}.".format(url)
+            log.error("Unexpected error: {0}".format(msg))
+            raise HTTPNotImplemented("The SDN controller failed to return a successful response status code.")
+
+        # check if the SDN controller returned a JSON response
+        try:
+            response_json = response.json()
+        except ValueError:  # response not in JSON
+            msg = "The SDN controller returned a response which couldn't be converted to JSON - request was sent to {0}.".format(url)
+            log.error("Unexpected error: {0}".format(msg))
+            raise HTTPNotImplemented("The SDN controller failed to return a valid JSON response.")
+
+        return response_json
+
     @view_config(route_name='graph_network_topology', request_method='DELETE')
     def delete_network_topology(self):
         """
@@ -393,32 +380,31 @@ class GraphAPI(object):
 
         influx_client = InfluxDBClient(host=self.request.registry.settings['influx_host'], port=self.request.registry.settings['influx_port'], timeout=10)
 
-        database_name = json_queries["service_function_chain"]
+        sfc = json_queries["service_function_chain"]
+        database_name = json_queries["service_function_chain"]  # currently DB is named after the SFC
         if database_name not in [db["name"] for db in influx_client.get_list_database()]:
             raise HTTPBadRequest("Database for service function chain {0} not found.".format(database_name))
 
-        request_uuid = str(uuid4())
-        sfc = json_queries["service_function_chain"]
-
-        # get the list of ues
-        ues_file = self.request.registry.settings["network_ues_path"]
-        try:
-            with open(ues_file) as fh:
-                ues = load(fh)
-        except Exception as e:
-            log.error("Unexpected error: {0}".format(e))
-            log.error("No service-router-to-ue mapping was found while building the network topology.")
-            ues = {}
-
-        ues_list = list(ues.values())
+        # get the list of UEs from the Neo4j graph - if no UEs are found, the network topology has not been built yet, so return bad request
+        graph = Graph(host=self.request.registry.settings['neo4j_host'], password=self.request.registry.settings['neo4j_password'])
+        ue_nodes = graph.nodes.match("UserEquipment")
+        ues_list = [ue_node["name"] for ue_node in ue_nodes]
+        # return bad request for empty list of UEs
+        if len(ues_list) == 0:
+            raise HTTPBadRequest("Graph pipeline process for SFC {0} cannot be started - no UserEquipment nodes found, the graph network topology has not been built.".format(sfc))
+        # add the list of UEs to the JSON configuration passed to the pipeline script
         json_queries["ues"] = ues_list
 
+        # start a graph pipeline subprocess
         process = Popen(["graph-pipeline.sh", dumps(json_queries)])
         process_pid = process.pid
         process_return_code = process.returncode
 
+        # check if for some reason the process returned immediately (could be if the graph-pipeline script is not found
         if process_return_code is None:  # process has started running
-            log.info("Started a graph pipeline process for SFC {0} with PID {1}".format(sfc, process_pid))
+            request_uuid = str(uuid4())  # generate a request UUID used to map to the process ID of the graph pipeline
+
+            log.info("Started a graph pipeline process for SFC {0} with PID {1} and request UUID {2}".format(sfc, process_pid, request_uuid))
 
             MonitoringProcess.add({"request_id": request_uuid, "process_id": process_pid})
 
diff --git a/src/test/VERSION b/src/test/VERSION
index 5a6bc65ed94515a041237159e5d2c89402f12a58..3391f8417e5d96317d5c25ca088eab68c25b5f6f 100644
--- a/src/test/VERSION
+++ b/src/test/VERSION
@@ -1 +1 @@
-__version__ = "2.0.0"
\ No newline at end of file
+__version__ = "2.0.2"
\ No newline at end of file