diff --git a/docs/clmc-service.md b/docs/clmc-service.md
index 01e5baee9904d7a8c949eb4513e03ab9d596b1fc..4a87b60ebb670abfe0225a3c91fafb917919d3f2 100644
--- a/docs/clmc-service.md
+++ b/docs/clmc-service.md
@@ -187,20 +187,94 @@ with **/clmc-service** so that the nginx reverse proxy server (listening on port
     * For each service function, there is a field/fields from which an average estimation of the size of a **response** from this service function can be derived.
     * All the aforementioned fields reside in a single measurement.
 
-* **POST** ***/graph/temporal?from={timestamp-seconds}&to={timestamp-seconds}***
+* **POST** ***/graph/monitor***
 
-    This API method sends a request to the CLMC service to build a graph related to the time range between the *from* and *to* timestamps (URL query parameters).
+    This API method instructs the CLMC service to run a pipeline script executing the workflow of building a temporal graph, querying for round-trip time (and other measurements) and
+    deleting the temporal graph. The pipeline script will run continuously as a background process on CLMC.
+    
+    * Request:
+    
+        Expects a JSON-formatted request body that declares the service function chain and service function chain instance for which the pipeline script will execute.
+        The request body must define the service functions that will be included in the graph along with the measurement name, response time field, request size field and
+        response size field for each service function. The declared fields could be influx functions across multiple fields. The query period (how often the pipeline is executed)
+        and the results measurement name (where results will be inserted) must also be included.
+        
+    * Request Body Example:
+         ```json
+        {
+          "query_period": "30",
+          "results_measurement_name": "round_trip_time_measurement",
+          "service_function_chain": "MSDemo",
+          "service_function_chain_instance": "MSDemo_1",
+          "service_functions": {
+            "nginx": {
+              "response_time_field": "mean(response_time)",
+              "request_size_field": "mean(request_size)",
+              "response_size_field": "mean(response_size)",
+              "measurement_name": "nginx"
+            },
+            "minio": {
+              "response_time_field": "mean(sum)/mean(count)",
+              "request_size_field": "mean(request_size)/mean(count)",
+              "response_size_field": "mean(response_size)/mean(count)",
+              "measurement_name": "minio_http_requests_duration_seconds"
+            }
+          }
+        }
+        ```
+    
+    * Response:
+
+        The response of this request is a JSON content, which contains a generated UUID and the database name. 
+        This request UUID can then be used to manage the pipeline script (e.g. stopping it).
+
+        Returns a 400 Bad Request error if the request body is invalid.
+     
+    * Response Body Example:
+    
+        ```json
+        {
+          "uuid": "75df6f8d-3829-4fd8-a3e6-b3e917010141",
+          "database": "MSDemo"
+        }
+        ```
+
+* **DELETE** ***/graph/monitor/{request_id}*** 
+
+    This API methods instructs the CLMC service to stop running a graph monitoring pipeline script associated with the request identifier in the URL.
+    (retrieved from the response of a POST request for /graph/monitor), e.g. request sent to */graph/monitor/75df6f8d-3829-4fd8-a3e6-b3e917010141*
+
+    * Response:
+
+        The response of this request is a JSON content, which contains a single output message related to the state of the monitoring process before it gets killed.
+
+        Returns a 404 Not Found error if the request ID is not associated with any graph monitoring process.
+
+   * Response Body Example:
+
+        ```json
+        {
+           "msg": "Monitoring process has been successfully stopped."
+        }
+        ```
+
+* **POST** ***/graph/temporal***
+
+    This API method sends a request to the CLMC service to build a graph snapshot in the time range between the *from* and *to* timestamps.
 
    * Request:
 
         Expects a JSON-formatted request body which declares the service function chain and service function chain instance for which the graph is built.
-        The request should also include the service functions that must be included in the graph along with the measurement name, response time field, request size field and
-        response size field for each service function. The declared fields could be influx functions across multiple fields.
+        The request body must define the service functions that will be included in the graph along with the measurement name, response time field, request size field and
+        response size field for each service function. The declared fields could be influx functions across multiple fields. 
+        The time range is also declared in the body by the *from* and *to* parameters. 
 
    * Request Body Example:
 
         ```json
         {
+          "from": 1528385420,
+          "to": 1528385860,
           "service_function_chain": "MSDemo",
           "service_function_chain_instance": "MSDemo_1",
           "service_functions": {
@@ -223,13 +297,13 @@ with **/clmc-service** so that the nginx reverse proxy server (listening on port
         These parameters are then filled in the following influx query template:
 
         ```
-        SELECT {0} AS mean_response_time, {1} AS mean_request_size, {2} AS mean_response_size FROM "{3}"."{4}".{5} WHERE "flame_sfc"=\'{6}\' and "flame_sfci"=\'{7}\' and time>={8} and time<{9} GROUP BY "flame_sfe", "flame_location", "flame_sf"
+        SELECT {0} AS mean_response_time, {1} AS mean_request_size, {2} AS mean_response_size FROM "{3}"."{4}".{5} WHERE "flame_sfc"=\'{6}\' and "flame_sfci"=\'{7}\' and "flame_sfp"=\'{8}\' and time>={9} and time<{10} GROUP BY "flame_sfe", "flame_location", "flame_sf"
         ```
 
-        E.g. for the minio service function, the following query will be used to retrieve the data from influx (request url is */graph/build?from=1528385420&to=1528385860*):
+        E.g. for the minio service function (i.e. flame_sfp=minio), the following query will be used to retrieve the data from InfluxDB:
 
         ```
-        SELECT mean(sum)/mean(count) AS mean_response_time, mean(request_size)/mean(count) AS mean_request_size, mean(response_size)/mean(count) AS mean_response_size FROM "MSDemo"."autogen".minio_http_requests_duration_seconds WHERE "flame_sfc"='MSDemo' and "flame_sfci"='MSDemo_1' and time>=1528385420000000000 and time<1528385860000000000 GROUP BY "flame_sfe", "flame_location", "flame_sf"
+        SELECT mean(sum)/mean(count) AS mean_response_time, mean(request_size)/mean(count) AS mean_request_size, mean(response_size)/mean(count) AS mean_response_size FROM "MSDemo"."autogen".minio_http_requests_duration_seconds WHERE "flame_sfc"='MSDemo' and "flame_sfci"='MSDemo_1' and "flame_sfp"='minio' and time>=1528385420000000000 and time<1528385860000000000 GROUP BY "flame_sfe", "flame_location", "flame_sf"
         ```
         
         N.B. database name is assumed to be the SFC identifier
@@ -237,40 +311,23 @@ with **/clmc-service** so that the nginx reverse proxy server (listening on port
 
    * Response:
 
-        The response of this request is a JSON content, which contains all request parameters used to build the graph, along with a request UUID. 
+        The response of this request is a JSON content, which contains the temporal graph's UUID, the database name, the timestamps converted to nanoseconds and a list
+        of the names of all Endpoint nodes that were created for this temporal graph.
         This request ID can then be used to manage the temporal subgraph that was created in response to this request.
 
         Returns a 400 Bad Request error if the request body is invalid.
 
-        Returns a 400 Bad Request error if the request URL parameters are invalid or missing.
-
    * Response Body Example:
 
         ```json
         {
           "database": "MSDemo",
-          "retention_policy": "autogen",
-          "service_function_chain": "MSDemo",
-          "service_function_chain_instance": "MSDemo_1",
-          "service_functions": {
-            "nginx": {
-              "response_time_field": "mean(response_time)",
-              "request_size_field": "mean(request_size)",
-              "response_size_field": "mean(response_size)",
-              "measurement_name": "nginx"
-            },
-            "minio": {
-              "response_time_field": "mean(sum)/mean(count)",
-              "request_size_field": "mean(request_size)/mean(count)",
-              "response_size_field": "mean(response_size)/mean(count)",
-              "measurement_name": "minio_http_requests_duration_seconds"
-            }
-          },
           "graph": {
              "uuid": "75df6f8d-3829-4fd8-a3e6-b3e917010141",
+             "endpoints": ["minio_ep1", "minio_ep2", "nginx_ep1", "nginx_ep2"],
              "time_range": {
-               "from": 1528385420,
-               "to": 1528385860
+               "from": 1528385420000000000,
+               "to": 1528385860000000000
              }
           }
         }
@@ -283,7 +340,7 @@ with **/clmc-service** so that the nginx reverse proxy server (listening on port
 
     * Response:
 
-        The response of this request is a JSON content, which contains the request UUID and the number of deleted nodes.
+        The response of this request is a JSON content, which contains the number of deleted nodes.
 
         Returns a 404 Not Found error if the request UUID is not associated with any nodes in the graph.
 
@@ -291,26 +348,46 @@ with **/clmc-service** so that the nginx reverse proxy server (listening on port
 
         ```json
         {
-           "uuid": "75df6f8d-3829-4fd8-a3e6-b3e917010141",
            "deleted": 5
         }
         ```
 
-* **GET** ***/graph/temporal/{graph_id}/round-trip-time?compute_node={compute_node_id}&endpoint={endpoint_id}***
+* **DELETE** ***/graph/static/{service_function_chain_identifier}***
+
+    This API method send a request to delete a full media service graph identified by its service function chain identifier - this means deleting the SFC node, all SFC instance nodes
+    linked to the SFC node, all SF package, SF nodes and temporal SF endpoint nodes which are part of the graph for this SFC.
+    
+    * Response:
+
+        The response of this request is a JSON content, which contains the number of deleted nodes.
+
+        Returns a 404 Not Found error if the SFC identifier is not associated with any SFC nodes in the graph.
+
+    * Response Body Example:
+
+        ```json
+        {
+           "deleted": 10
+        }
+        ```
+
+
+* **GET** ***/graph/temporal/{graph_id}/round-trip-time?startpoint={startpoint_id}&endpoint={endpoint_id}***
 
     This API method sends a request to run the Cypher Round-Trip-Time query over a temporal graph associated with a request UUID (retrieved from the response of a build-graph request).
-    The request UUID must be given in the request URL, e.g. request sent to */graph/temporal/75df6f8d-3829-4fd8-a3e6-b3e917010141/round-trip-time?compute_node=DC2&endpoint=minio_1_ep1*
+    The request UUID must be given in the request URL, e.g. request sent to */graph/temporal/75df6f8d-3829-4fd8-a3e6-b3e917010141/round-trip-time?startpoint=DC2&endpoint=minio_1_ep1*,
+    which will return the RTT breakdown for the path starting at Cluster DC2 to SF endpoint minio_1_ep1. The startpoint can be a Cluster, Switch, or UserEquipment node.
 
     * Response:
 
-        The response of this request is a JSON content, which contains the result from the Cypher query including forward latencies, reverse latencies and service function response time along with the
-        calculated round trip time and global tag values for the given service function endpoint.
+        The response to this request is a JSON content, which contains the result from the Cypher query including forward latencies, reverse latencies and service function response time along with the
+        calculated round trip time and tag values for a potential measurement.
 
         Returns a 400 Bad Request error if the URL parameters are invalid
 
         Returns a 404 Not Found error if the request UUID and the endpoint ID are not associated with an endpoint node in the graph.
 
-        Returns a 404 Not Found error if the compute node ID is not associated with a compute node in the graph.
+        Returns a 404 Not Found error if the startpoint ID is not associated with a Cluster or Switch node in the graph.
 
    * Response Body Example:
 
@@ -324,7 +401,7 @@ with **/clmc-service** so that the nginx reverse proxy server (listening on port
             ],
             "total_forward_latency": 33,
             "reverse_latencies": [
-               15, 18
+               11, 22
             ],
             "total_reverse_latency": 33,
             "response_time": 15.75,
@@ -337,14 +414,17 @@ with **/clmc-service** so that the nginx reverse proxy server (listening on port
                 "flame_sf": "minio_1",
                 "flame_location": "DC1",
                 "flame_server": "DC1"
+            },
+            "local_tags": {
+               "traffic_source": "DC2"
             }
         }
         ```
 
-        Here, the *forward_latencies* and *reverse_latencies* lists represent the latency experienced at each hop between compute nodes. For example, if the path was DC2-DC3-DC4 and the SF endpoint was hosted
-        on DC4, the response data shows that latency(DC2-DC3) = 22, latency(DC3-DC4) = 11, latency(DC4-DC3) = 15, latency(DC3-DC2) = 18, response_time(minio_1_ep1) = 15.75
+        Here, the *forward_latencies* and *reverse_latencies* lists represent the reported latency at each hop between switches/clusters/ues. For example, if the path was DC2-DC3-DC4 and the SF endpoint was hosted
+        on DC4, the response data shows that latency(DC2-DC3) = 22, latency(DC3-DC4) = 11, latency(DC4-DC3) = 11, latency(DC3-DC2) = 22, response_time(minio_1_ep1) = 15.75
 
-        N.B. if the endpoint is hosted on the compute node identified in the URL parameter, then there will be no network hops between compute nodes, so the latency lists would be empty, example:
+        N.B. if the endpoint is hosted on the cluster identified in the URL parameter, then there will be no network hops between clusters/switches, so the latency lists would be empty, example:
 
         ```json
         {
@@ -365,14 +445,53 @@ with **/clmc-service** so that the nginx reverse proxy server (listening on port
                 "flame_sf": "minio_1",
                 "flame_location": "DC1",
                 "flame_server": "DC1"
+            },
+            "local_tags": {
+               "traffic_source": "DC2"
             }
         }
         ```
 
-* Generating network measurements
+* **POST** ***/graph/network***
+
+    This API method instructs CLMC to build the network topology in its graph database by querying the SDN controller and retrieving the switch-to-switch latency measurements -
+    currently only Floodlight is supported as SDN controller. 
+
+    * Response:
+
+        The response to this request is a JSON content which shows how many Switch, Cluster and UserEquipment nodes were created in the graph.
+
+        Returns a 503 Service Unavailable error if the SDN controller cannot be reached.
+
+        Returns a 501 Not Implemented error if the SDN controller is reachable but cannot respond with a valid JSON content on the API endpoint for querying the network topology.
+
+   * Response Body Example:
+
+        ```json
+        {
+           "new_switches_count": 12,
+           "new_clusters_count": 5,
+           "new_ues_count": 3
+        }
+        ```
+
+* **DELETE** ***/graph/network***
+
+    This API method instructs CLMC to delete the network topology from its graph database.
+
+    * Response:
+
+        The response to this request is a JSON content which shows how many Switch, Cluster and UserEquipment nodes were deleted from the graph.
 
-    To generate network measurements, which are then used to create the network topology in the Neo4j graph, refer to
-    the src/service/clmcservice/generate_network_measurements.py script. An example configuration file is src/service/resources/GraphAPI/network_config.json
+   * Response Body Example:
+
+        ```json
+        {
+           "deleted_switches_count": 12,
+           "deleted_clusters_count": 5,
+           "deleted_ues_count": 3
+        }
+        ```
 
 ## CRUD API for service function endpoint configurations
 
diff --git a/scripts/clmc-service/graph-pipeline.sh b/scripts/clmc-service/graph-pipeline.sh
new file mode 100644
index 0000000000000000000000000000000000000000..2b8700cc6984400e1221ca0f995d136e69a79537
--- /dev/null
+++ b/scripts/clmc-service/graph-pipeline.sh
@@ -0,0 +1,100 @@
+#!/bin/bash
+#/////////////////////////////////////////////////////////////////////////
+#//
+#// (c) University of Southampton IT Innovation Centre, 2018
+#//
+#// Copyright in this software belongs to University of Southampton
+#// IT Innovation Centre of Gamma House, Enterprise Road,
+#// Chilworth Science Park, Southampton, SO16 7NS, UK.
+#//
+#// This software may not be used, sold, licensed, transferred, copied
+#// or reproduced in whole or in part in any manner or form or in or
+#// on any media by any person other than in accordance with the terms
+#// of the Licence Agreement supplied with the software, or otherwise
+#// without the prior written consent of the copyright owners.
+#//
+#// This software is distributed WITHOUT ANY WARRANTY, without even the
+#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+#// PURPOSE, except where stated in the Licence Agreement supplied with
+#// the software.
+#//
+#//      Created By :            Nikolay Sanchev
+#//      Created Date :          21/02/2019
+#//      Created for Project :   FLAME
+#//
+#/////////////////////////////////////////////////////////////////////////
+
+set -euo pipefail
+
+CLMC_IP="localhost"
+
+JSON_CONFIG=$1  # expects the JSON configuration passed to the execute_graph_pipeline API endpoint
+
+# extract and delete some of the configuration details, which are used by this script
+fields=$(echo ${JSON_CONFIG} | jq  -r '"\(.query_period) \(.service_function_chain) \(.results_measurement_name)"')
+read query_period db_name results_measurement <<< ${fields}
+
+# extract the list of ues
+ues=($(echo ${JSON_CONFIG} | jq  -r '.ues | .[]'))  # convert the jq array to bash array
+
+# delete these fields
+JSON_CONFIG=$(echo ${JSON_CONFIG} | jq 'del(.query_period, .results_measurement_name, .ues)')
+
+
+while true
+do
+    echo "Building temporal graph..."
+
+    end=$(date +%s)
+    start=$((${end}-${query_period}))
+
+    echo "Start - ${start}, End - ${end}"
+    JSON_STRING=$(echo ${JSON_CONFIG} | jq --argjson from ${start} --argjson to ${end} '. + {from: $from, to: $to}')
+    echo "Sending build request to CLMC"
+    echo "Request body - ${JSON_STRING}"
+    response=$(curl -s -X POST -d "${JSON_STRING}" http://${CLMC_IP}/clmc-service/graph/temporal)
+
+    fields=$(echo ${response} | jq  -r '"\(.graph.time_range.to) \(.graph.uuid)"')
+    read timestamp graph_uuid <<< ${fields}
+
+    endpoints=($(echo ${response} | jq  -r '.graph.endpoints | .[]'))  # convert the jq array to bash array
+
+    echo "Received request uuid ${graph_uuid}"
+    echo "Timestamp to use for measurement ${timestamp}"
+    echo "Received endpoints: ${endpoints[@]}"
+
+    for endpoint in ${endpoints[@]}; do
+        for ue in ${ues[@]}; do
+
+            echo "Querying for round-trip time..."
+            response=$(curl -s -X GET "http://${CLMC_IP}/clmc-service/graph/temporal/${graph_uuid}/round-trip-time?startpoint=${ue}&endpoint=${endpoint}")
+
+            global_tags=$(echo ${response} | jq -r '.global_tags | to_entries | map("\(.key)=\(.value|tostring)") | join(",")')
+            echo "Global tags: ${global_tags}"
+
+            local_tags=$(echo ${response} | jq -r '.local_tags | to_entries | map("\(.key)=\(.value|tostring)") | join (",")')
+            echo "Local tags: ${local_tags}"
+
+            fields=$(echo ${response} | jq -r '. | "\(.round_trip_time) \(.response_time) \(.total_forward_latency)"')
+            read rtt service_delay network_delay <<< ${fields}
+
+            echo "Round-trip-time: ${rtt}"
+            echo "Service delay: ${service_delay}"
+            echo "Network latency ${network_delay}"
+
+            measurement_line="${results_measurement},${global_tags},${local_tags} round_trip_time=${rtt},service_delay=${service_delay},network_delay=${network_delay} ${timestamp}"
+            echo "Measurement line: ${measurement_line}"
+            response=$(curl -si -X POST "http://${CLMC_IP}/influxdb/write?db=${db_name}" --data-binary "${measurement_line}")
+            echo "InfluxDB response: ${response}"
+
+        done
+    done
+
+    echo "Deleting temporal graph..."
+    response=$(curl -s -X DELETE "http://${CLMC_IP}/clmc-service/graph/temporal/${graph_uuid}")
+    echo ${response}
+
+    echo "Sleeping ${query_period} seconds"
+    sleep $((${query_period}-1))
+
+done
\ No newline at end of file
diff --git a/scripts/clmc-service/install-clmc-service.sh b/scripts/clmc-service/install-clmc-service.sh
index a3755cd4ce86d7147381b0d5cfae188b41f03324..83cefd1fc069ccbcd0ec0f4c47a7501ff1bb22e2 100755
--- a/scripts/clmc-service/install-clmc-service.sh
+++ b/scripts/clmc-service/install-clmc-service.sh
@@ -35,7 +35,7 @@ sudo -u postgres bash -c "psql -c \"GRANT ALL PRIVILEGES ON DATABASE \"whoamidb\
 
 # install virtualenvwrapper to manage python environments - and check
 echo "----> Installing Python3, Pip3 and curl"
-apt-get install -y python3 python3-pip curl
+apt-get install -y python3 python3-pip curl jq
 update-alternatives --install /usr/bin/python python /usr/bin/python3 10
 
 echo "----> Installing virtualenv and wrapper"
@@ -126,6 +126,7 @@ start_script_file="/opt/flame/clmc/start.sh"
 echo "#!/bin/bash" > $start_script_file
 echo "export WORKON_HOME=${HOME}/.virtualenvs" >> $start_script_file
 echo "export SFEMC_FQDN=${SFEMC_FQDN}" >> $start_script_file
+echo "export SDN_CONTROLLER_IP=${SDN_CONTROLLER_IP}" >> $start_script_file
 echo "source /usr/local/bin/virtualenvwrapper.sh" >> $start_script_file
 echo "workon CLMC" >> $start_script_file
 echo "pserve ${REPO_ROOT}/src/service/production.ini &" >> $start_script_file
@@ -160,3 +161,6 @@ done
 apt-get install nginx -y
 cp ${REPO_ROOT}/scripts/clmc-service/nginx.conf /etc/nginx/nginx.conf
 systemctl restart nginx  # nginx is already started on installation, to read the new conf it needs to be restarted
+
+# move the graph pipeline script
+cp ${REPO_ROOT}/scripts/clmc-service/graph-pipeline.sh /usr/local/bin/
\ No newline at end of file
diff --git a/scripts/clmc-service/install-tick-stack.sh b/scripts/clmc-service/install-tick-stack.sh
index 5896abb920f5eb94704a06e1439a8112e24c6468..b6d63b1089125012dbb2cc708c2060a0856a1275 100755
--- a/scripts/clmc-service/install-tick-stack.sh
+++ b/scripts/clmc-service/install-tick-stack.sh
@@ -26,8 +26,8 @@
 
 echo "----> Installing Tick Stack"
 # Define tickstack software versions
-INFLUX_VERSION=1.5.2
-INFLUX_CHECKSUM=42fede7b497bdf30d4eb5138db218d1add986fca4fce4a8bcd9c7d6dabaf572a
+INFLUX_VERSION=1.6.5
+INFLUX_CHECKSUM=9cc44dbf2bc9dece249a89ef433e564074b9535bab714234bdba522c03438a3c
 
 KAPACITOR_VERSION=1.4.1
 KAPACITOR_CHECKSUM=eea9b215f241906570eafe3857e1d4c5
diff --git a/scripts/test/fixture.sh b/scripts/test/fixture.sh
index 2cfbae5fdf77a33fde8360058158bee569d14140..dc20be92e0b945cde9fed1c3cec247ebc2645979 100755
--- a/scripts/test/fixture.sh
+++ b/scripts/test/fixture.sh
@@ -77,7 +77,7 @@ create() {
         if [ ${service_name} == "clmc-service" ]; then
             cmd="${target_root}/scripts/clmc-service/install.sh"
             echo "Provisioning command ${cmd}"
-            lxc exec ${service_name} --env REPO_ROOT=${target_root} --env SFEMC_FQDN="sfemc.localhost" --env NETWORK_DEPENDENCY="network.target"-- ${cmd}
+            lxc exec ${service_name} --env REPO_ROOT=${target_root} --env SFEMC_FQDN="sfemc.localhost" --env SDN_CONTROLLER_IP="127.0.0.1" --env NETWORK_DEPENDENCY="network.target"-- ${cmd}
             exit_code=$?
             if [ $exit_code != 0 ]; then
                 echo "clmc-service installation failed with exit code ${exit_code}"
diff --git a/src/service/clmcservice/__init__.py b/src/service/clmcservice/__init__.py
index 3d56991b194b5c23f742f7205d35fa003fa60489..6b5c6e48116c354697b98c227f0c0e3903791205 100644
--- a/src/service/clmcservice/__init__.py
+++ b/src/service/clmcservice/__init__.py
@@ -24,7 +24,6 @@
 
 
 # Python standard libs
-from json import load
 from os.path import dirname, abspath
 import os
 
@@ -49,14 +48,14 @@ def main(global_config, **settings):
     Base.metadata.bind = engine  # bind the engine to the Base class metadata
 
     settings['sfemc_fqdn'] = os.environ['SFEMC_FQDN']  # read the SFEMC FQDN from the OS environment
+    settings['sfemc_port'] = int(os.environ.get('SFEMC_PORT', 8080))  # read the SFEMC port number from the OS environment, if not set use 8080 as default
+    settings['sdn_controller_ip'] = os.environ['SDN_CONTROLLER_IP']  # read the SDN controller IP address from the OS environment
+    settings['sdn_controller_port'] = int(os.environ.get('SDN_CONTROLLER_PORT', 8080))  # read the SDN controller port number from the OS environment, if not set use 8080 as default
 
     settings['influx_port'] = int(settings['influx_port'])  # the influx port setting must be converted to integer instead of a string
     settings['kapacitor_port'] = int(settings['kapacitor_port'])  # the kapacitor port setting must be converted to integer instead of a string
 
-    network_config_file_path = settings["network_configuration_path"]
-    with open(network_config_file_path) as f:
-        network = load(f)
-        settings["network_bandwidth"] = network["bandwidth"]
+    settings["network_bandwidth"] = int(settings["network_bandwidth"])  # TODO currently assumed fixed bandwidth across all links
 
     config = Configurator(settings=settings)
 
@@ -68,8 +67,12 @@ def main(global_config, **settings):
 
     # add routes of the GRAPH API
     config.add_route('graph_build', '/graph/temporal')
-    config.add_route('graph_manage', '/graph/temporal/{graph_id}')
+    config.add_route('temporal_graph_manage', '/graph/temporal/{graph_id}')
+    config.add_route('full_graph_manage', '/graph/static/{sfc_id}')
     config.add_route('graph_algorithms_rtt', '/graph/temporal/{graph_id}/round-trip-time')
+    config.add_route('graph_network_topology', '/graph/network')
+    config.add_route('graph_execute_pipeline', '/graph/monitor')
+    config.add_route('graph_manage_pipeline', '/graph/monitor/{request_id}')
 
     # add routes of the Alerts Configuration API
     config.add_route('alerts_configuration', '/alerts')
diff --git a/src/service/clmcservice/alertsapi/tests.py b/src/service/clmcservice/alertsapi/tests.py
index 5c5b4fa7a9bbfd196a1b3b32bd3d6e7a7bbc46a8..8fd2ac04547db1eb5876df0f6d4afd44bdfb05e2 100644
--- a/src/service/clmcservice/alertsapi/tests.py
+++ b/src/service/clmcservice/alertsapi/tests.py
@@ -62,7 +62,7 @@ class TestAlertsConfigurationAPI(object):
         """
 
         self.registry = testing.setUp()
-        self.registry.add_settings({"kapacitor_host": "localhost", "kapacitor_port": 9092, "sfemc_fqdn": "sfemc.localhost"})
+        self.registry.add_settings({"kapacitor_host": "localhost", "kapacitor_port": 9092, "sfemc_fqdn": "sfemc.localhost", "sfemc_port": 8081})
 
         yield
 
@@ -301,7 +301,7 @@ def extract_alert_spec_data(alert_spec):
             for handler_url in trigger.trigger_tpl["action"]["implementation"]:
 
                 if handler_url == "flame_sfemc":
-                    handler_url = "http://sfemc.localhost:8080/sfemc/event/{0}/{1}/{2}".format(sfc, policy_id, "trigger_id_{0}".format(version))
+                    handler_url = "http://sfemc.localhost:8081/sfemc/event/{0}/{1}/{2}".format(sfc, policy_id, "trigger_id_{0}".format(version))
 
                 handler_id = "{0}\n{1}\n{2}\n{3}\n{4}".format(sfc, sfc_instance, policy_id, trigger_id, handler_url)
                 handler_id = AlertsConfigurationAPI.get_hash(handler_id)
diff --git a/src/service/clmcservice/alertsapi/views.py b/src/service/clmcservice/alertsapi/views.py
index 20892c396e5827ccbc413b553a5f9f1a3275ccb9..d794e656293407823bf9bf64668e32c0192b6d25 100644
--- a/src/service/clmcservice/alertsapi/views.py
+++ b/src/service/clmcservice/alertsapi/views.py
@@ -326,7 +326,8 @@ class AlertsConfigurationAPI(object):
             # check for flame_sfemc entry, if found replace with sfemc FQDN
             if http_handler_url == SFEMC:
                 sfemc_fqdn = self.request.registry.settings['sfemc_fqdn']
-                http_handler_url = "http://{0}:8080/sfemc/event/{1}/{2}/{3}".format(sfemc_fqdn, sfc, policy_id, trigger_id)
+                sfemc_port = self.request.registry.settings['sfemc_port']
+                http_handler_url = "http://{0}:{1}/sfemc/event/{2}/{3}/{4}".format(sfemc_fqdn, sfemc_port, sfc, policy_id, trigger_id)
 
             handler_id = "{0}\n{1}\n{2}\n{3}\n{4}".format(sfc, sfc_i, policy_id, event_id, http_handler_url)
             handler_id = self.get_hash(handler_id)
diff --git a/src/service/clmcservice/graphapi/conftest.py b/src/service/clmcservice/graphapi/conftest.py
index 03e3f6d72df618f05cdeb3578c1fa155a1a090eb..f4017e807f8211183377524b2ce795590ccdbf5c 100644
--- a/src/service/clmcservice/graphapi/conftest.py
+++ b/src/service/clmcservice/graphapi/conftest.py
@@ -24,99 +24,68 @@
 
 import pytest
 from influxdb import InfluxDBClient
-from clmcservice.generate_network_measurements import report_network_measurements
 from py2neo import Graph
+from clmcservice.graphapi.utilities import build_network_graph
+
+
+# static network configuration data used for testing cases, latencies reported in milliseconds
+links = [
+    {
+        "src-switch": "dpid1",
+        "dst-switch": "dpid2",
+        "latency": 7.5 * 1000
+    },
+    {
+        "src-switch": "dpid1",
+        "dst-switch": "dpid3",
+        "latency": 9 * 1000
+    },
+    {
+        "src-switch": "dpid1",
+        "dst-switch": "dpid5",
+        "latency": 15 * 1000
+    },
+    {
+        "src-switch": "dpid2",
+        "dst-switch": "dpid4",
+        "latency": 10 * 1000
+    },
+    {
+        "src-switch": "dpid3",
+        "dst-switch": "dpid4",
+        "latency": 12.5 * 1000
+    },
+    {
+        "src-switch": "dpid5",
+        "dst-switch": "dpid6",
+        "latency": 4.5 * 1000
+    }
+]
+
+
+switches = {
+    "dpid1": "127.0.0.1",
+    "dpid2": "127.0.0.2",
+    "dpid3": "127.0.0.3",
+    "dpid4": "127.0.0.4",
+    "dpid5": "127.0.0.5",
+    "dpid6": "127.0.0.6"
+}
 
 
-# static network configuration data used for testing cases
-network_config = {
-    "bandwidth": 104857600,
-    "links": [
-        {
-            "source": "DC1",
-            "target": "DC2",
-            "min_response_time": 10,
-            "max_response_time": 20,
-            "avg_response_time": 15
-        },
-        {
-            "source": "DC2",
-            "target": "DC1",
-            "min_response_time": 16,
-            "max_response_time": 28,
-            "avg_response_time": 22
-        },
-        {
-            "source": "DC1",
-            "target": "DC3",
-            "min_response_time": 17,
-            "max_response_time": 19,
-            "avg_response_time": 18
-        },
-        {
-            "source": "DC3",
-            "target": "DC1",
-            "min_response_time": 15,
-            "max_response_time": 25,
-            "avg_response_time": 20
-        },
-        {
-            "source": "DC1",
-            "target": "DC5",
-            "min_response_time": 27,
-            "max_response_time": 33,
-            "avg_response_time": 30
-        },
-        {
-            "source": "DC5",
-            "target": "DC1",
-            "min_response_time": 10,
-            "max_response_time": 42,
-            "avg_response_time": 26
-        },
-        {
-            "source": "DC2",
-            "target": "DC4",
-            "min_response_time": 11,
-            "max_response_time": 29,
-            "avg_response_time": 20
-        },
-        {
-            "source": "DC4",
-            "target": "DC2",
-            "min_response_time": 12,
-            "max_response_time": 40,
-            "avg_response_time": 26
-        },
-        {
-            "source": "DC3",
-            "target": "DC4",
-            "min_response_time": 23,
-            "max_response_time": 27,
-            "avg_response_time": 25
-        },
-        {
-            "source": "DC4",
-            "target": "DC3",
-            "min_response_time": 12,
-            "max_response_time": 18,
-            "avg_response_time": 15
-        },
-        {
-            "source": "DC5",
-            "target": "DC6",
-            "min_response_time": 3,
-            "max_response_time": 15,
-            "avg_response_time": 9
-        },
-        {
-            "source": "DC6",
-            "target": "DC5",
-            "min_response_time": 11,
-            "max_response_time": 11,
-            "avg_response_time": 11
-        },
-    ]
+clusters = {
+    "127.0.0.1": "DC1",
+    "127.0.0.2": "DC2",
+    "127.0.0.3": "DC3",
+    "127.0.0.4": "DC4",
+    "127.0.0.5": "DC5",
+    "127.0.0.6": "DC6"
+}
+
+ues = {
+    "127.0.0.2": "ue2",
+    "127.0.0.3": "ue3",
+    "127.0.0.6": "ue6"
 }
 
 
@@ -128,7 +97,7 @@ def db_testing_data():
     :return: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object
     """
 
-    global network_config
+    global links, switches, clusters
 
     test_sfc_name = "test_sfc"
     test_sfc_instance_1_name = "test_sfc_premium"
@@ -142,9 +111,8 @@ def db_testing_data():
 
     # create the physical infrastructure subgraph
     dbs = influx.get_list_database()
-    if "CLMCMetrics" not in dbs:
-        influx.create_database("CLMCMetrics")
-    report_network_measurements("localhost", "CLMCMetrics", network_config, "localhost", "admin")
+    switch_count, cluster_count, ues_count = build_network_graph(graph, switches, links, clusters, ues)
+    assert switch_count == 6 and cluster_count == 6 and ues_count == 3, "Network graph build failure"
 
     # check if exists ( if so, clear ) or create the test DB in influx
     if test_db_name in dbs:
diff --git a/src/service/clmcservice/graphapi/tests.py b/src/service/clmcservice/graphapi/tests.py
index 60ee4c1809b165a8166dc64cada16071d704b0e8..0b07a87ff40dc7c738f0008cf135560e5327df1f 100644
--- a/src/service/clmcservice/graphapi/tests.py
+++ b/src/service/clmcservice/graphapi/tests.py
@@ -22,15 +22,14 @@
 //      Created for Project :   FLAME
 """
 
-from json import dumps
+from json import dumps, loads
+from signal import SIGKILL
+from unittest.mock import patch, Mock, MagicMock, PropertyMock
 import pytest
 from pyramid import testing
+from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound, HTTPInternalServerError
 from clmcservice.graphapi.views import GraphAPI
-from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound
-
-
-graph_1_id = None
-graph_2_id = None
+from clmcservice.models import MonitoringProcess
 
 
 class TestGraphAPI(object):
@@ -45,38 +44,47 @@ class TestGraphAPI(object):
         """
 
         self.registry = testing.setUp()
-        self.registry.add_settings({"neo4j_host": "localhost", "neo4j_password": "admin", "influx_host": "localhost", "influx_port": 8086, "network_bandwidth": 104857600})
+        self.registry.add_settings({"neo4j_host": "localhost", "neo4j_password": "admin", "influx_host": "localhost", "influx_port": 8086, "network_bandwidth": 104857600,
+                                    "network_ues_path": "/opt/clmc/src/service/resources/GraphAPI/network_ues.json"})
 
         yield
 
         testing.tearDown()
 
-    @pytest.mark.parametrize("body, from_timestamp, to_timestamp, error_msg", [
-        (None, None, None, "A bad request error must have been raised in case of missing request body."),
-        ('{}', 12341412, 1234897, "A bad request error must have been raised in case of invalid request body."),
-        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfci"}', 12341412, 1234897, "A bad request error must have been raised in case of invalid request body."),
-        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": "{invalid_json}"}', 1528386860, 1528389860, "A bad request error must have been raised in case of invalid request body."),
-        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": ["nginx", "minio"]}', 1528386860, 1528389860, "A bad request error must have been raised in case of invalid request body."),
-        ('"service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
-         1528386860, 1528389860, "A bad request error must have been raised in case of missing service function chain value in the request body"),
-        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfcinstance", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
-         1528386860, 1528389860, "A bad request error must have been raised in case of invalid sfci ID in the request body"),
-        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
-         "not a timestamp", "not a timestamp", "A bad request error must have been raised in case of invalid URL parameters."),
-        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
-         None, "not a timestamp", "A bad request error must have been raised in case of invalid URL parameters."),
-        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
-         2131212, None, "A bad request error must have been raised in case of invalid URL parameters."),
-        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
-         2131212, 2131212, "A bad request error must have been raised in case of a non-existing database."),
+    @pytest.mark.parametrize("body, expected_error, error_msg", [
+        (None, "Configuration must be a JSON object.", "A bad request error must have been raised in case of missing request body."),
+        ('{"from": 12341412, "to": 1234897}', "Invalid JSON query document.", "A bad request error must have been raised in case of invalid request body."),
+        ('{"service_function_chain": "sfc", "service_function_chain_instance": "sfci", "from": 12341412, "to": 1234897}',
+         "Invalid JSON query document.", "A bad request error must have been raised in case of invalid request body."),
+        ('{"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"invalid_json"}}',
+         "Configuration must be a JSON object.", "A bad request error must have been raised in case of invalid JSON body."),
+        ('{"from": 1528386860, "to":1528389860, "service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": ["nginx", "minio"]}',
+         "The service functions description should be represented with a JSON object.", "A bad request error must have been raised in case of invalid request body."),
+        ('{"from": 1528386860, "to":1528389860, "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         "Invalid JSON query document.", "A bad request error must have been raised in case of missing service function chain value in the request body"),
+        ('{"from": 1528386860, "to":1628389860, "service_function_chain": "sfc", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         "Invalid JSON query document.", "A bad request error must have been raised in case of missing sfci ID in the request body"),
+        ('{"from": "invalid", "to": "invalid", "service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         "'from' parameter must be a timestamp integer", "A bad request error must have been raised in case of invalid timestamp parameters."),
+        ('{"from": 123, "to": "invalid", "service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+        "'to' parameter must be a timestamp integer", "A bad request error must have been raised in case of invalid timestamp parameters."),
+        ('{"from": -10, "to": 20, "service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         "'from' parameter must be a positive timestamp integer or 0", "A bad request error must have been raised in case of negative timestamp parameters."),
+        ('{"from": 20, "to": 10, "service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         "'to' parameter timestamp must be greater than 'from' parameter timestamp", "A bad request error must have been raised in case of negative difference between 'to' and 'from' timestamp parameters."),
+        ('{"to": 154342324, "service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         "Invalid JSON query document.", "A bad request error must have been raised in case of missing 'from' timestamp parameters."),
+        ('{"from": 21321232, "service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         "Invalid JSON query document.", "A bad request error must have been raised in case of missing 'to' timestamp parameters."),
+        ('{"from": 1528386860, "to":1628389860, "service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         "Database for service function chain sfc not found.", "A bad request error must have been raised in case of a non-existing database."),
     ])
-    def test_build_error_handling(self, body, from_timestamp, to_timestamp, error_msg):
+    def test_build_error_handling(self, body, expected_error, error_msg):
         """
         Tests the error handling of the graph build API endpoint by passing erroneous input and confirming an HTTPBadRequest was returned.
 
         :param body: body of the request to test
-        :param from_timestamp: the 'from' URL param
-        :param to_timestamp: the 'to' URL param
+        :param expected_error: the expected error to be returned in the BadRequest response
         :param error_msg: the error message to pass in case of an error not being properly handled by the API endpoint (in other words, a test failure)
         """
 
@@ -84,67 +92,50 @@ class TestGraphAPI(object):
         if body is not None:
             request.body = body
         request.body = request.body.encode(request.charset)
-        if from_timestamp is not None:
-            request.params["from"] = from_timestamp
-        if to_timestamp is not None:
-            request.params["to"] = to_timestamp
+
         error_raised = False
         try:
             GraphAPI(request).build_temporal_graph()
-        except HTTPBadRequest:
+        except HTTPBadRequest as e:
+            print(e)
+            assert expected_error in str(e)
             error_raised = True
         assert error_raised, error_msg
 
-    def test_build(self, db_testing_data):
+    @patch('clmcservice.graphapi.views.uuid4')
+    def test_build(self, uuid_mock, db_testing_data):
         """
         Tests the graph build API endpoint - it makes 2 API calls and checks that the expected graph was created (the influx data that's being used is reported to InfluxDB in the conftest file)
 
-        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest)
+        :param uuid_mock: mock object to mock the behaviour of the uuid4 function
+        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data and the graph db client object (this is a fixture from conftest)
         """
 
-        global graph_1_id, graph_2_id  # these variables are used to store the ID of the graphs that were created during the execution of this test method; they are reused later when testing the delete method
-
         from_timestamp, to_timestamp, graph_db = db_testing_data
 
-        dc_nodes = set([node["name"] for node in graph_db.nodes.match("ComputeNode")])
+        ue_nodes = set([node["name"] for node in graph_db.nodes.match("UserEquipment")])
+        assert ue_nodes == set("ue" + str(i) for i in [2, 3, 6]), "UE nodes must have been created by the db_testing_data fixture"
+
+        dc_nodes = set([node["name"] for node in graph_db.nodes.match("Cluster")])
         assert dc_nodes == set("DC" + str(i) for i in range(1, 7)), "Compute nodes must have been created by the db_testing_data fixture"
 
-        # test with invalid URL parameters naming
-        service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)",
-                                        "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
-                                 minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
-                                        "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
-                                 apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
-                                         "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"})
-        body = dumps(dict(service_function_chain="sfc", service_function_chain_instance="sfc_1", service_functions=service_functions))
-        request = testing.DummyRequest()
-        request.params["from_timestamp"] = 12341412
-        request.params["to_timestamp"] = 12341412
-        request.body = body.encode(request.charset)
-        error_raised = False
-        try:
-            GraphAPI(request).build_temporal_graph()
-        except HTTPBadRequest:
-            error_raised = True
-        assert error_raised, "A bad request error must have been raised in case of invalid URL parameters."
+        switch_nodes = set([node["name"] for node in graph_db.nodes.match("Switch")])
+        assert switch_nodes == set("127.0.0." + str(i) for i in range(1, 7)), "Switch nodes must have been created by the db_testing_data fixture"
 
         # Create a valid build request and send it to the API endpoint
-        service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)",
-                                        "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
-                                 minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
-                                        "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"})
-        build_json_body = dict(service_function_chain="test_sfc", service_function_chain_instance="test_sfc_premium", service_functions=service_functions)
-        body = dumps(build_json_body)
-        request = testing.DummyRequest()
-        request.params["from"] = from_timestamp
-        request.params["to"] = to_timestamp
-        request.body = body.encode(request.charset)
-        response = GraphAPI(request).build_temporal_graph()
+        uuid_mock.return_value = "graph_test_build_uuid1"
+        responses = graph_generator(from_timestamp, to_timestamp)  # generates 2 graphs by sending request to the build API and yielding back the responses for each request
+        response = next(responses)
+
         graph_subresponse = response.pop("graph")
-        assert response == build_json_body, "Response must contain the request body"
-        assert graph_subresponse.get("uuid") is not None, "Request UUID must be attached to the response."
+
+        assert response == {"database": "test_sfc"}, "Response must contain the database name"
+
+        assert graph_subresponse["uuid"] == uuid_mock.return_value, "Request UUID must be attached to the response."
+        assert graph_subresponse["time_range"]["from"] == from_timestamp * 10**9  # timestamp returned in nanoseconds
+        assert graph_subresponse["time_range"]["to"] == to_timestamp * 10**9  # timestamp returned in nanoseconds
+        assert set(graph_subresponse["endpoints"]) == {"minio_1_ep1", "nginx_1_ep1", "nginx_1_ep2"}, "Wrong list of new endpoints was returned by the build request"
         request_id = graph_subresponse["uuid"]
-        graph_1_id = request_id
 
         # check that the appropriate nodes have been created
         sfp_names = set([node["name"] for node in graph_db.nodes.match("ServiceFunctionPackage")])
@@ -164,9 +155,9 @@ class TestGraphAPI(object):
         # check the appropriate edges have been created
         self.check_exist_relationship(
             (
-                ("minio_1_ep1", "Endpoint", "DC4", "ComputeNode", "hostedBy"),
-                ("nginx_1_ep1", "Endpoint", "DC4", "ComputeNode", "hostedBy"),
-                ("nginx_1_ep2", "Endpoint", "DC6", "ComputeNode", "hostedBy"),
+                ("minio_1_ep1", "Endpoint", "DC4", "Cluster", "hostedBy"),
+                ("nginx_1_ep1", "Endpoint", "DC4", "Cluster", "hostedBy"),
+                ("nginx_1_ep2", "Endpoint", "DC6", "Cluster", "hostedBy"),
                 ("minio_1", "ServiceFunction", "minio_1_ep1", "Endpoint", "realisedBy"),
                 ("nginx_1", "ServiceFunction", "nginx_1_ep1", "Endpoint", "realisedBy"),
                 ("nginx_1", "ServiceFunction", "nginx_1_ep2", "Endpoint", "realisedBy"),
@@ -188,22 +179,18 @@ class TestGraphAPI(object):
             assert endpoint_node["response_size"] == pytest.approx(response_size, 1), "Wrong response size attribute of endpoint node"
 
         # send a new request for a new service function chain instance and check the new subgraph has been created
-        service_functions = dict(minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
-                                        "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
-                                 apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
-                                         "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"})
-        build_json_body = dict(service_function_chain="test_sfc", service_function_chain_instance="test_sfc_non_premium", service_functions=service_functions)
-        body = dumps(build_json_body)
-        request = testing.DummyRequest()
-        request.params["from"] = from_timestamp
-        request.params["to"] = to_timestamp
-        request.body = body.encode(request.charset)
-        response = GraphAPI(request).build_temporal_graph()
+        uuid_mock.return_value = "graph_test_build_uuid2"
+        response = next(responses)
+
         graph_subresponse = response.pop("graph")
-        assert response == build_json_body, "Response must contain the request body"
-        assert graph_subresponse.get("uuid") is not None, "Request UUID must be attached to the response."
+
+        assert response == {"database": "test_sfc"}, "Response must contain the database name"
+
+        assert graph_subresponse["uuid"] == uuid_mock.return_value, "Request UUID must be attached to the response."
+        assert graph_subresponse["time_range"]["from"] == from_timestamp * 10**9  # timestamp returned in nanoseconds
+        assert graph_subresponse["time_range"]["to"] == to_timestamp * 10**9  # timestamp returned in nanoseconds
+        assert set(graph_subresponse["endpoints"]) == {"minio_2_ep1", "apache_1_ep1"}, "Wrong list of new endpoints was returned by the build request"
         request_id = graph_subresponse["uuid"]
-        graph_2_id = request_id
 
         # check the new nodes have been created
         assert graph_db.nodes.match("ServiceFunctionPackage", name="apache").first() is not None, "Service function package apache must have been added to the graph"
@@ -223,8 +210,8 @@ class TestGraphAPI(object):
         # check the appropriate edges have been created
         self.check_exist_relationship(
             (
-                ("minio_2_ep1", "Endpoint", "DC5", "ComputeNode", "hostedBy"),
-                ("apache_1_ep1", "Endpoint", "DC5", "ComputeNode", "hostedBy"),
+                ("minio_2_ep1", "Endpoint", "DC5", "Cluster", "hostedBy"),
+                ("apache_1_ep1", "Endpoint", "DC5", "Cluster", "hostedBy"),
                 ("minio_2", "ServiceFunction", "minio_2_ep1", "Endpoint", "realisedBy"),
                 ("apache_1", "ServiceFunction", "apache_1_ep1", "Endpoint", "realisedBy"),
                 ("minio_2", "ServiceFunction", "minio", "ServiceFunctionPackage", "instanceOf"),
@@ -245,17 +232,34 @@ class TestGraphAPI(object):
             assert endpoint_node["request_size"] == pytest.approx(request_size, 1), "Wrong request size attribute of endpoint node"
             assert endpoint_node["response_size"] == pytest.approx(response_size, 1), "Wrong response size attribute of endpoint node"
 
-    def test_delete(self, db_testing_data):
+        request = testing.DummyRequest()
+        request.matchdict["sfc_id"] = "test_sfc"
+        response = GraphAPI(request).delete_full_graph()
+        assert response == {"deleted": 17}
+
+    @patch('clmcservice.graphapi.views.uuid4')
+    def test_delete_temporal(self, uuid_mock, db_testing_data):
         """
         Tests the delete API endpoint of the Graph API - the test depends on the build test to have been passed successfully so that graph_1_id and graph_2_id have been set
 
-        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest)
+        :param uuid_mock: the mock object used to mimic the behaviour of the uuid.uuid4 function
+        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data and the graph db client object (this is a fixture from conftest)
         """
 
-        global graph_1_id, graph_2_id
-
         from_timestamp, to_timestamp, graph_db = db_testing_data
 
+        # build test graphs
+        responses = graph_generator(from_timestamp, to_timestamp)  # generates 2 graphs by sending request to the build API and yielding back the responses for each request
+
+        uuid_mock.return_value = "graph_test_delete_uuid1"
+        graph_1_test_id = uuid_mock.return_value
+        next(responses)  # return value is the response, but we don't need it for this test
+
+        uuid_mock.return_value = "graph_test_delete_uuid2"
+        graph_2_test_id = uuid_mock.return_value
+        next(responses)  # return value is the response, but we don't need it for this test
+
+        # test erroneous behaviour
         request = testing.DummyRequest()
         request.matchdict["graph_id"] = "invalid_graph_id"
         error_raised = False
@@ -265,39 +269,80 @@ class TestGraphAPI(object):
             error_raised = True
         assert error_raised, "HTTP Not Found error must be raised in case of unrecognized subgraph ID"
 
+        assert len(graph_db.nodes.match(uuid=graph_1_test_id)) == 4, "Graph build generator function is broken"
+        assert len(graph_db.nodes.match(uuid=graph_2_test_id)) == 3, "Graph build generator function is broken"
+
         # delete the graph associated with graph_1_id
         request = testing.DummyRequest()
-        request.matchdict["graph_id"] = graph_1_id
+        request.matchdict["graph_id"] = graph_1_test_id
         response = GraphAPI(request).delete_temporal_graph()
-        assert response == {"uuid": graph_1_id, "deleted": 4}, "Incorrect response when deleting temporal graph"
+        assert response == {"deleted": 4}, "Incorrect response when deleting temporal graph"
+        assert len(graph_db.nodes.match(uuid=graph_1_test_id)) == 0, "Delete request is broken"
 
         # delete the graph associated with graph_2_id
         request = testing.DummyRequest()
-        request.matchdict["graph_id"] = graph_2_id
+        request.matchdict["graph_id"] = graph_2_test_id
         response = GraphAPI(request).delete_temporal_graph()
-        assert response == {"uuid": graph_2_id, "deleted": 3}, "Incorrect response when deleting temporal graph"
+        assert response == {"deleted": 3}, "Incorrect response when deleting temporal graph"
+        assert len(graph_db.nodes.match(uuid=graph_2_test_id)) == 0, "Delete request is broken"
 
-        assert len(graph_db.nodes.match("Endpoint")) == 0, "All endpoint nodes should have been deleted"
-        assert set([node["name"] for node in graph_db.nodes.match("ComputeNode")]) == set(["DC" + str(i) for i in range(1, 7)]), "Compute nodes must not be deleted"
+        assert set([node["name"] for node in graph_db.nodes.match("Cluster")]) == set(["DC" + str(i) for i in range(1, 7)]), "Cluster nodes must not be deleted"
+        assert set([node["name"] for node in graph_db.nodes.match("Switch")]) == set(["127.0.0." + str(i) for i in range(1, 7)]), "Switch nodes must not be deleted"
+        assert set([node["name"] for node in graph_db.nodes.match("UserEquipment")]) == set(["ue" + str(i) for i in (2, 3, 6)]), "UE nodes must not be deleted"
+        assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionEndpoint")]) == set(), "Endpoint nodes must have been deleted"
         assert set([node["name"] for node in graph_db.nodes.match("ServiceFunction")]) == {"nginx_1", "apache_1", "minio_1", "minio_2"}, "Service functions must not be deleted."
         assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionPackage")]) == {"nginx", "minio", "apache"}, "Service function packages must not be deleted"
         assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChainInstance")]) == {"test_sfc_premium", "test_sfc_non_premium"}, "Service function chain instances must not be deleted"
         assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChain")]) == {"test_sfc"}, "Service function chains must not be deleted"
 
-    @pytest.mark.parametrize("graph_id, endpoint, compute_node, error_type, error_msg", [
+        request = testing.DummyRequest()
+        request.matchdict["sfc_id"] = "test_sfc"
+        response = GraphAPI(request).delete_full_graph()
+        assert response == {"deleted": 10}
+
+    def test_delete_full(self, db_testing_data):
+        """
+        Tests the functionality to delete the full media service graph starting from a service function chain node down to an endpoint node.
+
+        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data and the graph db client object (this is a fixture from conftest)
+
+        :return:
+        """
+
+        from_timestamp, to_timestamp, graph_db = db_testing_data
+
+        responses = graph_generator(from_timestamp, to_timestamp)  # generates 2 graphs by sending request to the build API and yielding back the responses for each request
+        next(responses)
+        next(responses)
+
+        request = testing.DummyRequest()
+        request.matchdict["sfc_id"] = "invalid_test_sfc"
+        error_raised = False
+        try:
+            GraphAPI(request).delete_full_graph()
+        except HTTPNotFound:
+            error_raised = True
+        assert error_raised, "Error must have been raised for invalid SFC identifier"
+
+        request = testing.DummyRequest()
+        request.matchdict["sfc_id"] = "test_sfc"
+        response = GraphAPI(request).delete_full_graph()
+        assert response == {"deleted": 17}
+
+    @pytest.mark.parametrize("graph_id, endpoint, startpoint, error_type, error_msg", [
         ('e8cd4768-47dd-48cd-9c74-7f8926ddbad8', None, None, HTTPBadRequest, "HTTP Bad Request must be thrown in case of missing or invalid url parameters"),
         ('e8cd4768-47dd-48cd-9c74-7f8926ddbad8', None, "nginx", HTTPBadRequest, "HTTP Bad Request must be thrown in case of missing or invalid url parameters"),
         ('e8cd4768-47dd-48cd-9c74-7f8926ddbad8', "nginx_1_ep1", None, HTTPBadRequest, "HTTP Bad Request must be thrown in case of missing or invalid url parameters"),
         ('random-uuid', "nginx_1_ep1", "nginx", HTTPNotFound, "HTTP Not Found error must be thrown for an endpoint node with incorrect request ID"),
         ('random-uuid', "minio_1_ep1", "minio", HTTPNotFound, "HTTP Not Found error must be thrown for an endpoint node with incorrect request ID"),
     ])
-    def test_rtt_error_handling(self, graph_id, endpoint, compute_node, error_type, error_msg):
+    def test_rtt_error_handling(self, graph_id, endpoint, startpoint, error_type, error_msg):
         """
         Tests the error handling of the graph round trip time API endpoint - achieved by sending erroneous input in the request and verifying the appropriate error type has been returned.
 
         :param graph_id: the UUID of the subgraph
         :param endpoint: endpoint ID
-        :param compute_node: compute node ID
+        :param startpoint: the start node ID
         :param error_type: error type to expect as a response
         :param error_msg: error message in case of a test failure
         """
@@ -306,8 +351,8 @@ class TestGraphAPI(object):
         request.matchdict["graph_id"] = graph_id
         if endpoint is not None:
             request.params["endpoint"] = endpoint
-        if compute_node is not None:
-            request.params["compute_node"] = compute_node
+        if startpoint is not None:
+            request.params["startpoint"] = startpoint
         error_raised = False
         try:
             GraphAPI(request).run_rtt_query()
@@ -319,29 +364,17 @@ class TestGraphAPI(object):
         """
         Tests the rtt API endpoint of the Graph API.
 
-        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest)
+        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data and the graph db client object (this is a fixture from conftest)
         """
 
         from_timestamp, to_timestamp, graph_db = db_testing_data
 
         # create a graph to use for RTT test by using the build API endpoint
-        service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)",
-                                        "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
-                                 minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
-                                        "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"})
-        build_json_body = dict(service_function_chain="test_sfc", service_function_chain_instance="test_sfc_premium", service_functions=service_functions)
-        body = dumps(build_json_body)
-        request = testing.DummyRequest()
-        request.params["from"] = from_timestamp
-        request.params["to"] = to_timestamp
-        request.body = body.encode(request.charset)
-        response = GraphAPI(request).build_temporal_graph()
-        graph_subresponse = response.pop("graph")
-        assert response == build_json_body, "Response must contain the request body"
-        assert graph_subresponse.get("uuid") is not None, "Request UUID must be attached to the response."
-        request_id = graph_subresponse["uuid"]
+        responses = graph_generator(from_timestamp, to_timestamp)  # generates 2 graphs by sending request to the build API and yielding back the responses for each request
+        response = next(responses)
+        request_id = response["graph"]["uuid"]
 
-        # test some more error case handling of the RTT API endpoint
+        # test more error case handling of the RTT API endpoint
         request = testing.DummyRequest()
         request.matchdict["graph_id"] = request_id
         request.params["endpoint"] = "nginx_1_ep1"
@@ -356,7 +389,7 @@ class TestGraphAPI(object):
         request = testing.DummyRequest()
         request.matchdict["graph_id"] = request_id
         request.params["endpoint"] = "nginx_1_ep1"
-        request.params["compute_node"] = "DC0"
+        request.params["startpoint"] = "DC0"
         error_raised = False
         try:
             GraphAPI(request).run_rtt_query()
@@ -367,7 +400,7 @@ class TestGraphAPI(object):
         request = testing.DummyRequest()
         request.matchdict["graph_id"] = request_id
         request.params["endpoint"] = "apache_1_ep1"
-        request.params["compute_node"] = "DC1"
+        request.params["startpoint"] = "DC1"
         error_raised = False
         try:
             GraphAPI(request).run_rtt_query()
@@ -375,58 +408,231 @@ class TestGraphAPI(object):
             error_raised = True
         assert error_raised, "HTTP Not Found error must be thrown for a non existing endpoint"
 
+        # test valid requests
         # go through the set of input/output (expected) parameters and assert actual results match with expected ones
-        for dc, endpoint, forward_latencies, reverse_latencies, response_time, request_size, response_size, rtt, global_tags in (
+        for startpoint, endpoint, forward_latencies, reverse_latencies, response_time, request_size, response_size, rtt, global_tags in (
             ("DC6", "nginx_1_ep2", [], [], 22.2, 35600, 6420, 22.2, {"flame_location": "DC6", "flame_sfe": "nginx_1_ep2", "flame_server": "DC6", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"}),
-            ("DC2", "nginx_1_ep2", [11, 15, 4.5], [5.5, 13, 7.5], 22.2, 35600, 6420, 78, {"flame_location": "DC6", "flame_sfe": "nginx_1_ep2", "flame_server": "DC6", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"}),
-            ("DC3", "nginx_1_ep1", [12.5], [7.5], 18.2, 2260, 9660, 38, {"flame_location": "DC4", "flame_sfe": "nginx_1_ep1", "flame_server": "DC4", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"})
+            ("127.0.0.6", "nginx_1_ep2", [0], [0], 22.2, 35600, 6420, 22.2, {"flame_location": "DC6", "flame_sfe": "nginx_1_ep2", "flame_server": "DC6", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"}),
+            ("ue6", "nginx_1_ep2", [0, 0], [0, 0], 22.2, 35600, 6420, 22.2, {"flame_location": "DC6", "flame_sfe": "nginx_1_ep2", "flame_server": "DC6", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"}),
+            ("DC2", "nginx_1_ep2", [0, 7.5, 15, 4.5, 0], [0, 4.5, 15, 7.5, 0], 22.2, 35600, 6420, 78, {"flame_location": "DC6", "flame_sfe": "nginx_1_ep2", "flame_server": "DC6", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"}),
+            ("127.0.0.2", "nginx_1_ep2", [7.5, 15, 4.5, 0], [0, 4.5, 15, 7.5], 22.2, 35600, 6420, 78, {"flame_location": "DC6", "flame_sfe": "nginx_1_ep2", "flame_server": "DC6", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"}),
+            ("DC3", "nginx_1_ep1", [0, 12.5, 0], [0, 12.5, 0], 18.2, 2260, 9660, 38, {"flame_location": "DC4", "flame_sfe": "nginx_1_ep1", "flame_server": "DC4", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"}),
+            ("127.0.0.3", "nginx_1_ep1", [12.5, 0], [0, 12.5], 18.2, 2260, 9660, 38, {"flame_location": "DC4", "flame_sfe": "nginx_1_ep1", "flame_server": "DC4", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"}),
+            ("ue3", "nginx_1_ep1", [0, 12.5, 0], [0, 12.5, 0], 18.2, 2260, 9660, 38, {"flame_location": "DC4", "flame_sfe": "nginx_1_ep1", "flame_server": "DC4", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"})
         ):
             request = testing.DummyRequest()
             request.matchdict["graph_id"] = request_id
             request.params["endpoint"] = endpoint
-            request.params["compute_node"] = dc
+            request.params["startpoint"] = startpoint
             response = GraphAPI(request).run_rtt_query()
             # approximation is used to avoid long float numbers retrieved from influx, the test case ensures the results are different enough so that approximation of +-1 is good enough for testing
             assert response.pop("round_trip_time") == pytest.approx(rtt, 1), "Incorrect RTT response"
             assert response == {"forward_latencies": forward_latencies, "reverse_latencies": reverse_latencies, "total_forward_latency": sum(forward_latencies), "total_reverse_latency": sum(reverse_latencies),
-                                "bandwidth": 104857600, "response_time": response_time, "global_tags": global_tags,
+                                "bandwidth": 104857600, "response_time": response_time, "local_tags": {"traffic_source": startpoint}, "global_tags": global_tags,
                                 "request_size": request_size, "response_size": response_size}, "Incorrect RTT response"
 
         # send a new request for a new service function chain to create a second subgraph to test
-        service_functions = dict(minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
-                                        "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
-                                 apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
-                                         "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"})
-        build_json_body = dict(service_function_chain="test_sfc", service_function_chain_instance="test_sfc_non_premium", service_functions=service_functions)
-        body = dumps(build_json_body)
-        request = testing.DummyRequest()
-        request.params["from"] = from_timestamp
-        request.params["to"] = to_timestamp
-        request.body = body.encode(request.charset)
-        response = GraphAPI(request).build_temporal_graph()
-        graph_subresponse = response.pop("graph")
-        assert response == build_json_body, "Response must contain the request body"
-        assert graph_subresponse.get("uuid") is not None, "Request UUID must be attached to the response."
-        request_id = graph_subresponse["uuid"]
+        response = next(responses)
+        request_id = response["graph"]["uuid"]
 
+        # test valid requests
         # go through the set of input/output (expected) parameters and assert actual results match with expected ones
-        for dc, endpoint, forward_latencies, reverse_latencies, response_time, request_size, response_size, rtt, global_tags in (
+        for startpoint, endpoint, forward_latencies, reverse_latencies, response_time, request_size, response_size, rtt, global_tags in (
             ("DC5", "apache_1_ep1", [], [], 17.6, 1480, 7860, 17.6, {"flame_location": "DC5", "flame_sfe": "apache_1_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "apache", "flame_sf": "apache_1"}),
+            ("127.0.0.5", "apache_1_ep1", [0], [0], 17.6, 1480, 7860, 17.6, {"flame_location": "DC5", "flame_sfe": "apache_1_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "apache", "flame_sf": "apache_1"}),
             ("DC5", "minio_2_ep1", [], [], 7, 2998, 3610, 7, {"flame_location": "DC5", "flame_sfe": "minio_2_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "minio", "flame_sf": "minio_2"}),
-            ("DC3", "apache_1_ep1", [10, 15], [13, 9], 17.6, 1480, 7860, 64, {"flame_location": "DC5", "flame_sfe": "apache_1_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "apache", "flame_sf": "apache_1"}),
-            ("DC2", "minio_2_ep1", [11, 15], [13, 7.5], 7, 2998, 3610, 53, {"flame_location": "DC5", "flame_sfe": "minio_2_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "minio", "flame_sf": "minio_2"})
+            ("127.0.0.5", "minio_2_ep1", [0], [0], 7, 2998, 3610, 7, {"flame_location": "DC5", "flame_sfe": "minio_2_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "minio", "flame_sf": "minio_2"}),
+            ("DC3", "apache_1_ep1", [0, 9, 15, 0], [0, 15, 9, 0], 17.6, 1480, 7860, 64, {"flame_location": "DC5", "flame_sfe": "apache_1_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "apache", "flame_sf": "apache_1"}),
+            ("127.0.0.3", "apache_1_ep1", [9, 15, 0], [0, 15, 9], 17.6, 1480, 7860, 64, {"flame_location": "DC5", "flame_sfe": "apache_1_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "apache", "flame_sf": "apache_1"}),
+            ("ue3", "apache_1_ep1", [0, 9, 15, 0], [0, 15, 9, 0], 17.6, 1480, 7860, 64, {"flame_location": "DC5", "flame_sfe": "apache_1_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "apache", "flame_sf": "apache_1"}),
+            ("DC2", "minio_2_ep1", [0, 7.5, 15, 0], [0, 15, 7.5, 0], 7, 2998, 3610, 53, {"flame_location": "DC5", "flame_sfe": "minio_2_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "minio", "flame_sf": "minio_2"}),
+            ("127.0.0.2", "minio_2_ep1", [7.5, 15, 0], [0, 15, 7.5], 7, 2998, 3610, 53, {"flame_location": "DC5", "flame_sfe": "minio_2_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "minio", "flame_sf": "minio_2"}),
+            ("ue2", "minio_2_ep1", [0, 7.5, 15, 0], [0, 15, 7.5, 0], 7, 2998, 3610, 53, {"flame_location": "DC5", "flame_sfe": "minio_2_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "minio", "flame_sf": "minio_2"})
         ):
             request = testing.DummyRequest()
             request.matchdict["graph_id"] = request_id
             request.params["endpoint"] = endpoint
-            request.params["compute_node"] = dc
+            request.params["startpoint"] = startpoint
             response = GraphAPI(request).run_rtt_query()
             # approximation is used to avoid long float numbers retrieved from influx, the test case ensures the results are different enough so that approximation of +-1 is good enough for testing
             assert response.pop("request_size") == pytest.approx(request_size, 1), "Incorrect RTT response"
             assert response.pop("response_size") == pytest.approx(response_size, 1), "Incorrect RTT response"
             assert response.pop("round_trip_time") == pytest.approx(rtt, 1), "Incorrect RTT response"
             assert response == {"forward_latencies": forward_latencies, "reverse_latencies": reverse_latencies, "total_forward_latency": sum(forward_latencies), "total_reverse_latency": sum(reverse_latencies),
-                                "bandwidth": 104857600, "response_time": response_time, "global_tags": global_tags}, "Incorrect RTT response"
+                                "bandwidth": 104857600, "response_time": response_time, "local_tags": {"traffic_source": startpoint}, "global_tags": global_tags}, "Incorrect RTT response"
+
+    def test_delete_network_graph(self):
+        """
+        Tests the delete network graph functionality.
+        """
+
+        request = testing.DummyRequest()
+        response = GraphAPI(request).delete_network_topology()
+
+        assert response == {"deleted_switches_count": 6, "deleted_clusters_count": 6, "deleted_ues_count": 3}
+
+    @pytest.mark.parametrize("body, expected_error, error_msg", [
+        (None, "Configuration must be a JSON object.", "A bad request error must have been raised in case of missing request body."),
+        ('{"query_period": 45, "results_measurement_name": "graph"}', "Invalid JSON query document.", "A bad request error must have been raised in case of invalid request body."),
+        ('{"service_function_chain": "sfc", "service_function_chain_instance": "sfci", "query_period": 30, "results_measurement_name": "graph"}',
+         "Invalid JSON query document.", "A bad request error must have been raised in case of invalid request body."),
+        ('{"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"invalid_json"}}',
+         "Configuration must be a JSON object.", "A bad request error must have been raised in case of invalid JSON body."),
+        ('{"query_period": 45, "results_measurement_name": "graph", "service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": ["nginx", "minio"]}',
+         "The service functions description should be represented with a JSON object.", "A bad request error must have been raised in case of invalid request body."),
+        ('{"query_period": 45, "results_measurement_name": "graph", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         "Invalid JSON query document.", "A bad request error must have been raised in case of missing service function chain value in the request body"),
+        ('{"query_period": 45, "results_measurement_name": "graph", "service_function_chain": "sfc", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         "Invalid JSON query document.", "A bad request error must have been raised in case of missing sfci ID in the request body"),
+        ('{"query_period": "invalid", "results_measurement_name": "graph", "service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         "'query_period' parameter must be an integer", "A bad request error must have been raised in case of invalid query period parameter."),
+        ('{"query_period": -30, "results_measurement_name": "graph", "service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+        "'query_period' parameter must be a positive integer.", "A bad request error must have been raised in case of non-positive query period parameter."),
+        ('{"results_measurement_name": "graph", "service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         "Invalid JSON query document.", "A bad request error must have been raised in case of missing 'query period' parameter."),
+        ('{"query_period": 30, "service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         "Invalid JSON query document.", "A bad request error must have been raised in case of missing 'results_measurement_name' parameter."),
+        ('{"query_period": 45, "results_measurement_name": "graph", "service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         "Database for service function chain sfc not found.", "A bad request error must have been raised in case of a non-existing database."),
+    ])
+    def test_pipeline_error_handling(self, body, expected_error, error_msg):
+        """
+        Tests the handling of invalid requests for the execute pipeline graph API endpoint.
+
+        :param body: body of the request to test
+        :param expected_error: the expected error to be returned in the BadRequest response
+        :param error_msg: the error message to pass in case of an error not being properly handled by the API endpoint (in other words, a test failure)
+        """
+
+        request = testing.DummyRequest()
+        if body is not None:
+            request.body = body
+        request.body = request.body.encode(request.charset)
+
+        error_raised = False
+        try:
+            GraphAPI(request).execute_graph_pipeline()
+        except HTTPBadRequest as e:
+            print(e)
+            assert expected_error in str(e)
+            error_raised = True
+        assert error_raised, error_msg
+
+    @patch('clmcservice.graphapi.views.load')
+    @patch('clmcservice.graphapi.views.open')
+    @patch('clmcservice.graphapi.views.Popen')
+    @patch('clmcservice.graphapi.views.uuid4')
+    def test_execute_graph_pipeline(self, uuid_mock, popen_mock, fileopen_mock, jsonload_mock):
+        """
+        Tests the functionality to start a pipeline script executing the graph API workflow - build, query, delete.
+
+        :param uuid_mock: mock object for the uuid generator function
+        :param popen_mock: mock object for the process creation function
+        :param fileopen_mock: mock object the mimic the behaviour of opening a file
+        :param jsonload_mock: mock object to mimic the behaviour of the JSON load function
+        """
+
+        # mock the behaviour of the uuid4 function
+        uuid_mock.return_value = "monitor_test_uuid1"
+
+        # mock the behaviour of the Popen class
+        pid_property_mock = PropertyMock(return_value=111)
+        returncode_property_mock = PropertyMock(return_value=None)
+        popen_intance_mock = Mock()
+        type(popen_intance_mock).pid = pid_property_mock  # a property mock cannot be attached directly to the mock object, hence use its type object
+        type(popen_intance_mock).returncode = returncode_property_mock  # a property mock cannot be attached directly to the mock object, hence use its type object
+        popen_mock.return_value = popen_intance_mock
+
+        # mock the behaviur of the open() and load() function
+        fileopen_mock.return_value = MagicMock()  # a magic mock is needed so that the dunder methods __enter__ and __exit__ are generated
+        ues_dict = {"127.0.0.1": "ue1", "127.0.0.2": "ue2", "127.0.0.3": "ue3"}
+        jsonload_mock.return_value = ues_dict
+
+        # check proper behaviour
+        service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)",
+                                        "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
+                                 minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
+                                        "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"})
+        monitor_json_body = dict(service_function_chain="test_sfc", service_function_chain_instance="test_sfc_premium", service_functions=service_functions)
+        monitor_json_body["query_period"] = 30
+        monitor_json_body["results_measurement_name"] = "graph_measurements"
+        body = dumps(monitor_json_body)
+        request = testing.DummyRequest()
+        request.body = body.encode(request.charset)
+        response = GraphAPI(request).execute_graph_pipeline()
+        assert response == {"uuid": uuid_mock.return_value, "database": "test_sfc"}
+
+        monitor_json_body["ues"] = list(ues_dict.values())
+
+        # assert that the graph pipeline script is ran with the JSON config that was received in the request along with the UEs
+        actual_call_arguments = popen_mock.call_args[0][0]  # we expect exactly one call to Popen() with one argument which is a list
+        assert actual_call_arguments[0] == "graph-pipeline.sh", "Incorrect graph pipeline script name"
+        assert loads(actual_call_arguments[1]) == monitor_json_body, "Incorrect JSON configuration passed to pipeline script"
+        pid_property_mock.assert_called_once_with()  # assert that the process ID attribute was called and saved
+        returncode_property_mock.assert_called_once_with()  # assert that the process return code attribute was called to check if the process has started successfully
+
+        # check that the process ID was saved
+        assert MonitoringProcess.exists(uuid_mock.return_value), "Request identifier was not saved during the request processing"
+        assert MonitoringProcess.get(uuid_mock.return_value) == pid_property_mock.return_value, "Incorrect PID was saved during the request processing"
+
+        # check erroneous behaviour - process returns a value immediately
+        returncode_property_mock.return_value = -1
+        request = testing.DummyRequest()
+        request.body = body.encode(request.charset)
+        error_raised = False
+        try:
+            GraphAPI(request).execute_graph_pipeline()
+        except HTTPInternalServerError:
+            error_raised = True
+        assert error_raised, "Expecting a 500 HTTP error if the process terminated immediately after it was started"
+
+        # assert that the graph pipeline script is ran with the JSON config that was received in the request along with the UEs
+        actual_call_arguments = popen_mock.call_args[0][0]  # we expect exactly one call to Popen() with one argument which is a list
+        assert actual_call_arguments[0] == "graph-pipeline.sh", "Incorrect graph pipeline script name"
+        assert loads(actual_call_arguments[1]) == monitor_json_body, "Incorrect JSON configuration passed to pipeline script"
+        pid_property_mock.assert_called_with()  # assert that the process ID attribute was called and saved
+        returncode_property_mock.assert_called_with()  # assert that the process return code attribute was called to check if the process has started successfully
+
+    @patch('clmcservice.graphapi.views.kill')
+    def test_stop_graph_pipeline(self, mock_kill):
+        """
+        Tests the funcitonality to stop a graph monitoring script.
+
+        :param mock_kill: mock object to mimic the behavior of the os.kill functionality
+        """
+
+        # mock a monitoring process
+        pid = 111
+        reqid = "test_request_id"
+        MonitoringProcess.add({"request_id": reqid, "process_id": pid})
+
+        # test behaviour with not-existing request UUID
+        request = testing.DummyRequest()
+        request.matchdict["request_id"] = "unknown-request-uuid"
+        error_raised = False
+        try:
+            GraphAPI(request).stop_graph_pipeline()
+        except HTTPNotFound:
+            error_raised = True
+        assert error_raised, "Error must have been raised for unrecognised request UUID."
+
+        # test the behaviour when the PID doesn't exist or another OSError is thrown
+        mock_kill.side_effect = OSError("error")
+        request = testing.DummyRequest()
+        request.matchdict["request_id"] = reqid
+        response = GraphAPI(request).stop_graph_pipeline()
+        assert response == {"msg": "Monitoring process has been stopped before this request was executed."}
+
+        # test behaviour with existing request UUID and existing PID
+        MonitoringProcess.add({"request_id": reqid, "process_id": pid})
+        assert MonitoringProcess.exists(reqid)
+        mock_kill.side_effect = None
+        request = testing.DummyRequest()
+        request.matchdict["request_id"] = reqid
+        response = GraphAPI(request).stop_graph_pipeline()
+        assert response == {"msg": "Monitoring process has been successfully stopped."}
+        mock_kill.assert_called_with(pid, SIGKILL)  # assert that os.kill was called with termination signal
+        assert not MonitoringProcess.exists(reqid), "Request ID must be removed when the process is killed."
 
     @staticmethod
     def check_exist_relationship(relationships_tuple, graph, uuid):
@@ -453,3 +659,40 @@ class TestGraphAPI(object):
             assert to_node is not None  # IMPORTANT, assert the from_node exists, otherwise the py2neo RelationshipMatcher object assumes you are looking for any node (instead of raising an error)
 
             assert graph.relationships.match(nodes=(from_node, to_node), r_type=relationship_type).first() is not None, "Graph is missing a required relationship"
+
+
+def graph_generator(from_timestamp, to_timestamp):
+    """
+    Utility function (generator) used to send 2 valid graph build requests to the graph API - yields the response of each.
+
+    :param from_timestamp: 'from' timestamp
+    :param to_timestamp: 'to' timestamp
+    """
+
+    service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)",
+                                    "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
+                             minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
+                                    "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"})
+    build_json_body = dict(service_function_chain="test_sfc", service_function_chain_instance="test_sfc_premium", service_functions=service_functions)
+    build_json_body["from"] = from_timestamp
+    build_json_body["to"] = to_timestamp
+    body = dumps(build_json_body)
+    request = testing.DummyRequest()
+    request.body = body.encode(request.charset)
+    response = GraphAPI(request).build_temporal_graph()
+
+    yield response
+
+    service_functions = dict(minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
+                                    "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
+                             apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
+                                     "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"})
+    build_json_body = dict(service_function_chain="test_sfc", service_function_chain_instance="test_sfc_non_premium", service_functions=service_functions)
+    build_json_body["from"] = from_timestamp
+    build_json_body["to"] = to_timestamp
+    body = dumps(build_json_body)
+    request = testing.DummyRequest()
+    request.body = body.encode(request.charset)
+    response = GraphAPI(request).build_temporal_graph()
+
+    yield response
diff --git a/src/service/clmcservice/graphapi/utilities.py b/src/service/clmcservice/graphapi/utilities.py
index 18efc199408c727c9bdbb42c81de68da67214934..10b0a311c89e761d0abaa03059d5af9a98ded710 100644
--- a/src/service/clmcservice/graphapi/utilities.py
+++ b/src/service/clmcservice/graphapi/utilities.py
@@ -24,37 +24,48 @@
 
 from json import loads
 from py2neo import Node, Relationship
-import logging
+from logging import getLogger
 
 
-GRAPH_ROUND_TRIP_TIME_URL_PARAMS = ("compute_node", "endpoint")
+GRAPH_ROUND_TRIP_TIME_URL_PARAMS = ("startpoint", "endpoint")
 
-GRAPH_BUILD_URL_PARAMS = ("from", "to")
-GRAPH_BUILD_QUERY_PARAMS = {"service_function_chain", "service_function_chain_instance", "service_functions"}
+GRAPH_MONITOR_QUERY_PARAMS = {"query_period", "results_measurement_name", "service_function_chain", "service_function_chain_instance", "service_functions"}
+
+GRAPH_BUILD_QUERY_PARAMS = {"from", "to", "service_function_chain", "service_function_chain_instance", "service_functions"}
 GRAPH_BUILD_SF_QUERY_PARAMS = {"response_time_field", "request_size_field", "response_size_field", "measurement_name"}
 
-INFLUX_QUERY_TEMPLATE = 'SELECT {0} AS mean_response_time, {1} AS mean_request_size, {2} AS mean_response_size FROM "{3}"."{4}".{5} WHERE "flame_sfc"=\'{6}\' and "flame_sfci"=\'{7}\' and time>={8} and time<{9} GROUP BY "flame_sfe", "flame_location", "flame_sf"'
+INFLUX_QUERY_TEMPLATE = 'SELECT {0} AS mean_response_time, {1} AS mean_request_size, {2} AS mean_response_size FROM "{3}"."{4}".{5} WHERE "flame_sfc"=\'{6}\' and "flame_sfci"=\'{7}\' and "flame_sfp"=\'{8}\' and time>={9} and time<{10} GROUP BY "flame_sfe", "flame_location", "flame_sf"'
 
 
+# in cypher the syntax is {name: 'value'}, here we use {{name: 'value'}} to escape the curly braces when applying the python format function
 RTT_CYPHER_QUERY_TEMPLATE = """
-MATCH (dc:ComputeNode {{ name: '{0}' }}),(endpoint:Endpoint {{ name: '{1}', uuid: '{2}'}}), 
-path = shortestPath((dc)-[*]-(endpoint))
+MATCH (startpoint:{0} {{ name: '{1}' }}),(endpoint:Endpoint {{ name: '{2}', uuid: '{3}'}}), 
+path = shortestPath((startpoint)-[*]-(endpoint))
 WHERE ALL(r IN relationships(path) WHERE type(r)='linkedTo' or type(r)='hostedBy' )
-WITH nodes(path) as all_nodes, endpoint as endpoint
-    WITH all_nodes[0..size(all_nodes)-1] as network_nodes, endpoint as endpoint
-    UNWIND RANGE(0, size(network_nodes) - 2) as id
-    WITH network_nodes[id] as source, network_nodes[id+1] as target, endpoint.response_time as response_time, endpoint.request_size as request_size, endpoint.response_size as response_size
-        MATCH (source) -[r1]-> (target), (target) -[r2]-> (source)
-        RETURN collect(r1.latency) as forward_latencies, reverse(collect(r2.latency)) as reverse_latencies, response_time, request_size, response_size
+WITH extract(y in filter(x in relationships(path) WHERE type(x) = 'linkedTo') | y.latency) as latencies, endpoint.response_time as response_time, endpoint.request_size as request_size, endpoint.response_size as response_size
+RETURN latencies  as forward_latencies, reverse(latencies) as reverse_latencies, response_time, request_size, response_size
 """
 
+# DEPRECATED QUERY - use this if we have to switch back to using two directed edges between a given pair of nodes
+# RTT_CYPHER_QUERY_TEMPLATE = """
+# MATCH (dc:Cluster {{ name: '{0}' }}),(endpoint:Endpoint {{ name: '{1}', uuid: '{2}'}}),
+# path = shortestPath((dc)-[*]-(endpoint))
+# WHERE ALL(r IN relationships(path) WHERE type(r)='linkedTo' or type(r)='hostedBy' )
+# WITH nodes(path) as all_nodes, endpoint as endpoint
+#     WITH all_nodes[0..size(all_nodes)-1] as network_nodes, endpoint as endpoint
+#     UNWIND RANGE(0, size(network_nodes) - 2) as id
+#     WITH network_nodes[id] as source, network_nodes[id+1] as target, endpoint.response_time as response_time, endpoint.request_size as request_size, endpoint.response_size as response_size
+#         MATCH (source) -[r1]-> (target), (target) -[r2]-> (source)
+#         RETURN collect(r1.latency) as forward_latencies, reverse(collect(r2.latency)) as reverse_latencies, response_time, request_size, response_size
+# """
+
 
-log = logging.getLogger('service_logger')
+log = getLogger('service_logger')
 
 
-def validate_json_queries_body(body):
+def validate_build_request_body(body):
     """
-    Validates the request body containing mappings from service functions to queries to execute.
+    Validates the request body, with mappings from service functions to queries to execute, given in a request to build temporal graph.
 
     :param body: the request body to validate
 
@@ -63,11 +74,11 @@ def validate_json_queries_body(body):
     :raise AssertionError: if the body is invalid
     """
 
-    global GRAPH_BUILD_QUERY_PARAMS
+    global GRAPH_BUILD_QUERY_PARAMS, GRAPH_BUILD_SF_QUERY_PARAMS
 
     try:
         body = loads(body)
-    except:
+    except Exception:
         raise AssertionError("Configuration must be a JSON object.")
 
     assert GRAPH_BUILD_QUERY_PARAMS == set(body.keys()), "Invalid JSON query document."
@@ -83,40 +94,52 @@ def validate_json_queries_body(body):
     # except ValueError:
     #     assert False, "Incorrect format of service function chain instance ID - use format <sfcID>_<instanceNum>"
 
-    assert type(body["service_functions"]) == dict, "The service function description should be represented with a dictionary."
+    assert type(body["service_functions"]) == dict, "The service functions description should be represented with a JSON object."
 
     for sf in body["service_functions"]:
         query_data = body["service_functions"][sf]
         assert type(query_data) == dict, "Each service function must be associated with a respective JSON object."
         assert GRAPH_BUILD_SF_QUERY_PARAMS == set(query_data.keys()), "Invalid query data for service function {0} in the JSON query document".format(sf)
 
+    assert type(body["from"]) == int, "'from' parameter must be a timestamp integer"
+    assert body["from"] >= 0, "'from' parameter must be a positive timestamp integer or 0"
+    assert type(body["to"]) == int, "'to' parameter must be a timestamp integer"
+    assert body["to"] > body["from"], "'to' parameter timestamp must be greater than 'from' parameter timestamp"
+
     return body
 
 
-def validate_graph_url_params(params):
+def validate_monitor_request_body(body):
     """
-    Validates the request url parameters used in building a temporal graph.
+    Validates the request body, with mappings from service functions to queries to execute, given in a request to monitor graph metrics.
 
-    :param params: the parameters dictionary to validate
-    :return: the validated parameters
-    :raise AssertionError: for invalid parameters
-    """
+    :param body: the request body to validate
 
-    global GRAPH_BUILD_URL_PARAMS
+    :return the validated json queries dictionary object
 
-    url_params = {}
-    for param in GRAPH_BUILD_URL_PARAMS:
-        assert param in params, "Incorrect url parameters - required url query parameter '{0}' is not found in the request parameters.".format(param)
-        url_params[param] = params[param]
+    :raise AssertionError: if the body is invalid
+    """
+
+    global GRAPH_MONITOR_QUERY_PARAMS, GRAPH_BUILD_SF_QUERY_PARAMS
 
     try:
-        # convert timestamps to integers
-        url_params['from'] = int(url_params['from'])
-        url_params['to'] = int(url_params['to'])
-    except ValueError:
-        assert False, "Invalid URL timestamp parameters"
+        body = loads(body)
+    except Exception:
+        raise AssertionError("Configuration must be a JSON object.")
 
-    return url_params
+    assert GRAPH_MONITOR_QUERY_PARAMS == set(body.keys()), "Invalid JSON query document."
+
+    assert type(body["service_functions"]) == dict, "The service functions description should be represented with a JSON object."
+
+    for sf in body["service_functions"]:
+        query_data = body["service_functions"][sf]
+        assert type(query_data) == dict, "Each service function must be associated with a respective JSON object."
+        assert GRAPH_BUILD_SF_QUERY_PARAMS == set(query_data.keys()), "Invalid query data for service function {0} in the JSON query document".format(sf)
+
+    assert type(body["query_period"]) == int, "'query_period' parameter must be an integer"
+    assert body["query_period"] > 0, "'query_period' parameter must be a positive integer."
+
+    return body
 
 
 def validate_graph_rtt_params(params):
@@ -138,12 +161,13 @@ def validate_graph_rtt_params(params):
     return url_params
 
 
-def find_or_create_node(graph, node_type, **properties):
+def find_or_create_node(graph, node_type, return_created=False, **properties):
     """
     This function checks if a node of the given type with the given properties exists, and if not - creates it.
 
     :param graph: the graph object
     :param node_type: the type of the node to find or create
+    :param return_created: if True the result will contain both the node and a boolean flag if the node was created now
     :param properties: the properties of the node to find or create
     :return: the found or newly created node object
     """
@@ -153,12 +177,17 @@ def find_or_create_node(graph, node_type, **properties):
     else:
         node = graph.nodes.match(node_type, name=properties['name']).first()
 
+    created = False
     if node is None:
         log.info("Creating node of type {0} with properties {1}".format(node_type, properties))
         node = Node(node_type, **properties)
         graph.create(node)
+        created = True
 
-    return node
+    if return_created:
+        return node, created
+    else:
+        return node
 
 
 def find_or_create_edge(graph, edge_type, from_node, to_node, **properties):
@@ -182,7 +211,7 @@ def find_or_create_edge(graph, edge_type, from_node, to_node, **properties):
     return edge
 
 
-def build_temporal_graph(request_id, from_timestamp, to_timestamp, json_queries, graph, influx_client):
+def build_temporal_subgraph(request_id, from_timestamp, to_timestamp, json_queries, graph, influx_client):
     """
     A function used to generate a temporal graph in the neo4j db.
 
@@ -192,6 +221,8 @@ def build_temporal_graph(request_id, from_timestamp, to_timestamp, json_queries,
     :param json_queries: the JSON object containing the query data for each service function
     :param graph: the graph DB object
     :param influx_client: the influx DB client object
+
+    :return: the list of names of the endpoint nodes related to the new temporal graph
     """
 
     global INFLUX_QUERY_TEMPLATE
@@ -214,11 +245,11 @@ def build_temporal_graph(request_id, from_timestamp, to_timestamp, json_queries,
     # create a instanceOf edge if it doesn't exist
     find_or_create_edge(graph, "instanceOf", service_function_chain_instance_node, service_function_chain_node)
 
-    compute_nodes = set()  # a set is used to keep track of all compute nodes that are found while building the graph, which is then used to retrieve the network latencies
+    endpoints_names = set()  # keep track of the names of the endpoint nodes that are created
 
     # traverse the list of service functions
-    for service_function in json_queries["service_functions"]:
-        query_data = json_queries["service_functions"][service_function]
+    for service_function_package in json_queries["service_functions"]:
+        query_data = json_queries["service_functions"][service_function_package]
 
         response_time_field = query_data["response_time_field"]
         request_size_field = query_data["request_size_field"]
@@ -226,49 +257,51 @@ def build_temporal_graph(request_id, from_timestamp, to_timestamp, json_queries,
         measurement = query_data["measurement_name"]
 
         # build up the query by setting the placeholders in the query template
-        query_to_execute = INFLUX_QUERY_TEMPLATE.format(response_time_field, request_size_field, response_size_field, db, rp, measurement, sfc, sfci, from_timestamp, to_timestamp)
-
-        # create a node for the service function if it doesn't exist
-        service_function_package_node = find_or_create_node(graph, "ServiceFunctionPackage", name=service_function)
-        # crate a utilizedBy edge between the service function and the service function chain
-        find_or_create_edge(graph, "utilizedBy", service_function_package_node, service_function_chain_node)
+        query_to_execute = INFLUX_QUERY_TEMPLATE.format(response_time_field, request_size_field, response_size_field, db, rp, measurement, sfc, sfci, service_function_package, from_timestamp, to_timestamp)
 
         log.info("Executing query: {0}".format(query_to_execute))
         result = influx_client.query(query_to_execute)  # execute the query
-
-        # iterate through each result item
-        for item in result.items():
-            metadata, result_points = item  # each result item is a tuple of two elements
-
-            # metadata consists of the result tags and the measurement name
-            # measurement = metadata[0]
-            tags = metadata[1]
-
-            result_point = next(result_points)  # get the result point dictionary
-            response_time = result_point["mean_response_time"]  # extract the response time of the SF from the result
-            request_size = result_point["mean_request_size"]  # extract the avg request size of the SF from the result
-            response_size = result_point["mean_response_size"]  # extract the avg response size of the SF from the result
-
-            # create a ServiceFunction node from the tag value (if it is not already created)
-            service_function_node = find_or_create_node(graph, "ServiceFunction", name=tags["flame_sf"])
-            # create an edge between the the service function and the package (if it is not already created)
-            find_or_create_edge(graph, "instanceOf", service_function_node, service_function_package_node)
-            # crate a utilizedBy edge between the service function and the service function chain instance
-            find_or_create_edge(graph, "utilizedBy", service_function_node, service_function_chain_instance_node)
-
-            # create an Endpoint node from the tag value (if it is not already created)
-            ipendpoint_node = find_or_create_node(graph, "Endpoint", name=tags["flame_sfe"], response_time=response_time, request_size=request_size, response_size=response_size, uuid=request_id)
-            # create an edge between the service function and the endpoint (if it is not already created)
-            find_or_create_edge(graph, "realisedBy", service_function_node, ipendpoint_node)
-
-            # create a ComputeNode node from the tag value (if it is not already created)
-            compute_node = find_or_create_node(graph, "ComputeNode", name=tags["flame_location"])
-            # create an edge between the endpoint and the compute node (if it is not already created)
-            find_or_create_edge(graph, "hostedBy", ipendpoint_node, compute_node)
-
-            compute_nodes.add(compute_node)  # add the compute node to the set of compute nodes
+        result_items = result.items()
+
+        if len(result_items) > 0:
+            # create a node for the service function if it doesn't exist
+            service_function_package_node = find_or_create_node(graph, "ServiceFunctionPackage", name=service_function_package)
+            # crate a utilizedBy edge between the service function and the service function chain
+            find_or_create_edge(graph, "utilizedBy", service_function_package_node, service_function_chain_node)
+
+            # iterate through each result item
+            for item in result_items:
+                metadata, result_points = item  # each result item is a tuple of two elements
+
+                # metadata consists of the result tags and the measurement name
+                # measurement = metadata[0]
+                tags = metadata[1]
+
+                result_point = next(result_points)  # get the result point dictionary
+                response_time = result_point["mean_response_time"]  # extract the response time of the SF from the result
+                request_size = result_point["mean_request_size"]  # extract the avg request size of the SF from the result
+                response_size = result_point["mean_response_size"]  # extract the avg response size of the SF from the result
+
+                # create a ServiceFunction node from the tag value (if it is not already created)
+                service_function_node = find_or_create_node(graph, "ServiceFunction", name=tags["flame_sf"])
+                # create an edge between the the service function and the package (if it is not already created)
+                find_or_create_edge(graph, "instanceOf", service_function_node, service_function_package_node)
+                # crate a utilizedBy edge between the service function and the service function chain instance
+                find_or_create_edge(graph, "utilizedBy", service_function_node, service_function_chain_instance_node)
+
+                # create an Endpoint node from the tag value (if it is not already created)
+                ipendpoint_node = find_or_create_node(graph, "Endpoint", name=tags["flame_sfe"], response_time=response_time, request_size=request_size, response_size=response_size, uuid=request_id)
+                # create an edge between the service function and the endpoint (if it is not already created)
+                find_or_create_edge(graph, "realisedBy", service_function_node, ipendpoint_node)
+                endpoints_names.add(tags["flame_sfe"])
+
+                # create a Cluster node from the tag value (if it is not already created)
+                compute_node = find_or_create_node(graph, "Cluster", name=tags["flame_location"])
+                # create an edge between the endpoint and the compute node (if it is not already created)
+                find_or_create_edge(graph, "hostedBy", ipendpoint_node, compute_node)
 
     log.info("Finished building graph for service function chain {0} from database {1} with retention policy {2}".format(sfci, db, rp))
+    return list(endpoints_names)
 
 
 def delete_temporal_subgraph(graph, subgraph_id):
@@ -292,3 +325,180 @@ def delete_temporal_subgraph(graph, subgraph_id):
     log.info("Deleted {0} nodes associated with ID {1}".format(nodes_matched, subgraph_id))
 
     return nodes_matched
+
+
+def build_network_graph(graph, switches, links, clusters, ues):
+    """
+    A function used to build the network topology in the neo4j graph given the collection of switches, links and clusters.
+
+    :param graph: the neo4j graph database client
+    :param switches: a collection of all switches in the topology - mapping between the DPID of the switch and its IP address
+    :param links: a collection of all switch-to-switch links in the network topology - JSON format, list of objects, each object must have "src-switch", "dst-switch" and "latency" as keys
+    :param clusters: a collection of all clusters and the IP address of the service router that they are connected to - mapping between an IP address of a service router and a cluster identifier
+    :param ues: a collection of all ues and the IP address of the service router that they are connected to - mapping between an IP address of a servicer router and a ue identifier
+    """
+
+    new_switches_count = 0
+    new_clusters_count = 0
+    new_ues_count = 0
+
+    for link in links:
+        # get the DPID of the source switch
+        source = link["src-switch"]
+        # get the IP address of the source switch
+        source = switches[source]
+
+        # get the DPID of the destination switch
+        destination = link["dst-switch"]
+        # get the IP address of the destination switch
+        destination = switches[destination]
+
+        # retrieve the latency for this link
+        latency = link["latency"] / 1000  # convert to seconds
+
+        # create or retrieve the from node
+        from_node, created = find_or_create_node(graph, "Switch", return_created=True, name=source)
+        if created:
+            new_switches_count += 1
+
+        # create or retrieve the to node
+        to_node, created = find_or_create_node(graph, "Switch", return_created=True, name=destination)
+        if created:
+            new_switches_count += 1
+
+        # create the link between the two nodes
+        find_or_create_edge(graph, "linkedTo", from_node, to_node, latency=latency)
+
+        # check whether the source service router connects a particular cluster or a particular UE
+        if create_node_from_mapping(graph, from_node, source, clusters, "Cluster"):
+            new_clusters_count += 1
+        if create_node_from_mapping(graph, from_node, source, ues, "UserEquipment"):
+            new_ues_count += 1
+
+        # check whether the destination service router connects a particular cluster or a particular UE
+        if create_node_from_mapping(graph, to_node, destination, clusters, "Cluster"):
+            new_clusters_count += 1
+        if create_node_from_mapping(graph, to_node, destination, ues, "UserEquipment"):
+            new_ues_count += 1
+
+    return new_switches_count, new_clusters_count, new_ues_count
+
+
+def create_node_from_mapping(graph, node, node_ip, mapping, new_node_type):
+    """
+    Creates an additional node of a given type if a mapping from a switch node is found.
+
+    :param graph: the neo4j graph database client
+    :param node: the original node
+    :param node_ip: the original node's IP address
+    :param mapping: the mapping object (dictionary from IP address to identifier)
+    :param new_node_type: the type of the new node to be created
+
+    :return: True if new node was created and False otherwise
+    """
+
+    if node_ip in mapping:
+        new_node_name = mapping[node_ip]
+        new_node, created = find_or_create_node(graph, new_node_type, return_created=True, name=new_node_name)
+        find_or_create_edge(graph, "linkedTo", new_node, node, latency=0)
+        return created
+
+    return False
+
+
+def delete_network_graph(graph):
+    """
+    A function used to delete all nodes of type Switch and Cluster in the neo4j graph.
+
+    :param graph: the neo4j graph
+    :return: the number of deleted switches and clusters
+    """
+
+    log.info("Deleting Switch nodes.".format())
+
+    subgraph = graph.nodes.match("Switch")
+    deleted_switches = len(subgraph)
+    for node in subgraph:
+        graph.delete(node)
+
+    log.info("Deleted {0} Switch nodes.".format(deleted_switches))
+
+    log.info("Deleting Cluster nodes.")
+
+    subgraph = graph.nodes.match("Cluster")
+    deleted_clusters = len(subgraph)
+    for node in subgraph:
+        graph.delete(node)
+
+    log.info("Deleted {0} Cluster nodes.".format(deleted_clusters))
+
+    log.info("Deleting UserEquipment nodes.")
+
+    subgraph = graph.nodes.match("UserEquipment")
+    deleted_ues = len(subgraph)
+    for node in subgraph:
+        graph.delete(node)
+
+    log.info("Deleted {0} UserEquipment nodes.".format(deleted_clusters))
+
+    return deleted_switches, deleted_clusters, deleted_ues
+
+
+def find_node_with_possible_types(name, possible_types, graph):
+    """
+    Finds a node that matches one of the given possible types based on the given name.
+
+    :param name: the name of the node
+    :param possible_types: a list of strings defining the possible types for a node.
+    :param graph: the neo4j graph reference
+
+    :return: the node object and its type, or None and None if no match is found
+    """
+
+    for type_ in possible_types:
+        node = graph.nodes.match(type_, name=name).first()
+        if node is not None:
+            return node, type_
+
+    return None, None
+
+
+def depth_first_search(graph, root_node):
+    """
+    A generator, which performs a depth-first search through the graph starting from a root node and stopping when it reaches an Endpoint node.
+
+    :param graph: the graph db client
+    :param root_node: the root node reference (e.g. a ServiceFunctionChain node)
+
+    :return: a sequence of nodes traversed in depth-first manner
+    """
+
+    # a separate stack used to store the nodes in post-order
+    post_order = []
+
+    # a map between the type of a node and the type of edges that are going to be searched - e.g. for nodes of type ServiceFunctionChain, look for utilizedBy and instanceOf edges
+    node_edges_labels = {"ServiceFunctionChain": ("utilizedBy", "instanceOf"), "ServiceFunctionChainInstance": ("utilizedBy",), "ServiceFunction": ("realisedBy",)}
+
+    # the stack will hold a node and the type of labels
+    stack = [root_node]
+
+    log.info("Performing DFS starting from node {0}".format(root_node["name"]))
+    # simple declarative Depth-First search using a stack
+    while len(stack) > 0:
+        current_node = stack.pop()
+        post_order.append(current_node)
+
+        current_node_type = list(current_node.labels)[0]  # a node might have multiple labels in Neo4j, but in our scenario we expect exactly one label
+        for edge_label in node_edges_labels.get(current_node_type, []):  # for each possible edge label of the given node type, or empty list by default value
+            for relationship in graph.match({current_node, }, r_type=edge_label):  # we use a set because we do not care for direction
+                # the direction of the relationship is ambiguous so check which end of it we need to append
+                if relationship.end_node == current_node:
+                    stack.append(relationship.start_node)
+                else:
+                    stack.append(relationship.end_node)
+
+    log.info("Performing post order iteration after DFS is finished.")
+    # yield the elements in post order
+    while len(post_order) > 0:
+        node = post_order.pop()
+        yield node
diff --git a/src/service/clmcservice/graphapi/views.py b/src/service/clmcservice/graphapi/views.py
index 0223aea5b0d7b54967d10ad91832be8592ad10f4..582b731fee4cfbf215f0386a688cc470f34641d9 100644
--- a/src/service/clmcservice/graphapi/views.py
+++ b/src/service/clmcservice/graphapi/views.py
@@ -23,16 +23,23 @@
 """
 
 
-from clmcservice.graphapi.utilities import validate_json_queries_body, validate_graph_url_params, build_temporal_graph, delete_temporal_subgraph, validate_graph_rtt_params, RTT_CYPHER_QUERY_TEMPLATE
-from uuid import uuid4
+from clmcservice.graphapi.utilities import validate_build_request_body, validate_monitor_request_body, RTT_CYPHER_QUERY_TEMPLATE, \
+    build_network_graph, delete_network_graph, build_temporal_subgraph, delete_temporal_subgraph, validate_graph_rtt_params, find_node_with_possible_types, depth_first_search
+from clmcservice.models import MonitoringProcess
 from influxdb import InfluxDBClient
 from py2neo import Graph
-from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound
+from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound, HTTPServiceUnavailable, HTTPNotImplemented, HTTPInternalServerError
 from pyramid.view import view_defaults, view_config
-import logging
+from requests import exceptions, get
+from uuid import uuid4
+from json import load, dumps
+from subprocess import Popen
+from os import kill
+from signal import SIGKILL
+from logging import getLogger
 
 
-log = logging.getLogger('service_logger')
+log = getLogger('service_logger')
 
 
 @view_defaults(renderer='json')
@@ -54,23 +61,18 @@ class GraphAPI(object):
     def build_temporal_graph(self):
         """
         An API endpoint to build a temporal graph and store it in neo4j based on the posted JSON query document.
-        The request parameters must also include URL query parameters defining the time range for which the graph must be generated.
 
-        :raises HTTPBadRequest: if request body is not a valid JSON with the queries per service function or if request URL doesn't contain the required URL query parameters
-        :return: A JSON document containing the posted request body, along with meta data about the built graph (time range and uuid, which can then be reused for other API calls)
+        :raises HTTPBadRequest: if request body is not a valid JSON with the queries per service function
+
+        :return: A JSON document containing metadata about the built graph (time range and uuid, which can then be reused for other API calls)
         """
 
         try:
             body = self.request.body.decode(self.request.charset)
-            json_queries = validate_json_queries_body(body)  # validate the content and receive a json dictionary object
+            json_queries = validate_build_request_body(body)  # validate the content and receive a json dictionary object
         except AssertionError as e:
             raise HTTPBadRequest("Bad request content: {0}".format(e.args))
 
-        try:
-            params = validate_graph_url_params(self.request.params)
-        except AssertionError as e:
-            raise HTTPBadRequest("Request URL format is incorrect: {0}".format(e.args))
-
         graph = Graph(host=self.request.registry.settings['neo4j_host'], password=self.request.registry.settings['neo4j_password'])
         influx_client = InfluxDBClient(host=self.request.registry.settings['influx_host'], port=self.request.registry.settings['influx_port'], timeout=10)
 
@@ -78,21 +80,24 @@ class GraphAPI(object):
         if database_name not in [db["name"] for db in influx_client.get_list_database()]:
             raise HTTPBadRequest("Database for service function chain {0} not found.".format(database_name))
 
-        from_timestamp = params['from'] * 10**9
-        to_timestamp = params['to'] * 10**9
+        from_timestamp = json_queries['from'] * 10**9
+        to_timestamp = json_queries['to'] * 10**9
 
         request_id = str(uuid4())
 
-        build_temporal_graph(request_id, from_timestamp, to_timestamp, json_queries, graph, influx_client)
-        json_queries['graph'] = {"uuid": request_id, "time_range": {"from": from_timestamp, "to": to_timestamp}}
-        return json_queries
+        endpoints_names = build_temporal_subgraph(request_id, from_timestamp, to_timestamp, json_queries, graph, influx_client)
 
-    @view_config(route_name='graph_manage', request_method='DELETE')
+        json_response = {"database": database_name, 'graph': {"uuid": request_id, "endpoints": endpoints_names, "time_range": {"from": from_timestamp, "to": to_timestamp}}}
+
+        return json_response
+
+    @view_config(route_name='temporal_graph_manage', request_method='DELETE')
     def delete_temporal_graph(self):
         """
         An API endpoint to delete a temporal graph associated with a uuid generated by the CLMC service.
 
-        :return: A JSON document containing the UUID of the deleted subgraph
+        :return: A JSON document containing the number of deleted nodes.
+
         :raises HTTPNotFound: if the request is not associated with any subgraph
         """
 
@@ -103,7 +108,38 @@ class GraphAPI(object):
             raise HTTPNotFound("No subgraph found associated with the request ID {0}".format(graph_id))
 
         number_of_deleted_nodes = delete_temporal_subgraph(graph, graph_id)
-        return {"uuid": graph_id, "deleted": number_of_deleted_nodes}
+        return {"deleted": number_of_deleted_nodes}
+
+    @view_config(route_name='full_graph_manage', request_method='DELETE')
+    def delete_full_graph(self):
+        """
+        An API endpoint to delete a media service graph with a given SFC identifier.
+
+        :return: A JSON document containing the number of deleted nodes.
+
+        :raises HTTPNotFound: if there is no SFC node with the given identifier
+        """
+
+        sfc_id = self.request.matchdict['sfc_id']  # get the SFC identifier from the URL
+        graph = Graph(host=self.request.registry.settings['neo4j_host'], password=self.request.registry.settings['neo4j_password'])  # connect to the neo4j graph db
+
+        # check if this SFC node exists
+        sfc_node = graph.nodes.match("ServiceFunctionChain", name=sfc_id).first()
+        if sfc_node is None:
+            raise HTTPNotFound("No service function chain node found with identifier {0}".format(sfc_id))
+
+        # delete all nodes that are linked to the SFC node
+        count = 0
+        for node in depth_first_search(graph, sfc_node):
+            graph.delete(node)
+            count += 1
+
+        # delete any reference nodes for temporal graphs that are associated to this SFC identifier
+        for node in graph.nodes.match("Reference", sfc=sfc_id):
+            graph.delete(node)
+            count += 1
+
+        return {"deleted": count}
 
     @view_config(route_name='graph_algorithms_rtt', request_method='GET')
     def run_rtt_query(self):
@@ -111,6 +147,7 @@ class GraphAPI(object):
         An API endpoint to run the round trip time cypher query over the graph associated with a given request ID.
 
         :return: A JSON response with a list of forward latencies, reverse latencies and SF endpoint response time.
+
         :raises HTTPBadRequest: if the request URL doesn't contain the required URL query parameters
         :raises HTTPNotFound: if the request is not associated with any subgraph or the compute node / endpoint node doesn't exist
         """
@@ -122,7 +159,7 @@ class GraphAPI(object):
         except AssertionError as e:
             raise HTTPBadRequest("Request URL format is incorrect: {0}".format(e.args))
 
-        compute_node_label = params["compute_node"]
+        startpoint_node_label = params["startpoint"]
         endpoint_node_label = params["endpoint"]
 
         graph = Graph(host=self.request.registry.settings['neo4j_host'], password=self.request.registry.settings['neo4j_password'])  # connect to the neo4j graph db
@@ -133,9 +170,10 @@ class GraphAPI(object):
         if reference_node is None:
             raise HTTPNotFound("No subgraph found associated with the request ID {0}".format(graph_id))
 
-        compute_node = all_nodes.match("ComputeNode", name=compute_node_label).first()
-        if compute_node is None:
-            raise HTTPNotFound("Compute node {0} doesn't exist.".format(compute_node_label))
+        # match a switch, cluster or ue node as a path start point and capture the type of the node
+        startpoint_node, startpoint_node_type = find_node_with_possible_types(startpoint_node_label, ("Switch", "Cluster", "UserEquipment"), graph)
+        if startpoint_node is None:
+            raise HTTPNotFound("Starting point node {0} doesn't exist.".format(startpoint_node_label))
 
         endpoint_node = all_nodes.match("Endpoint", name=endpoint_node_label, uuid=graph_id).first()
         if endpoint_node is None:
@@ -143,14 +181,18 @@ class GraphAPI(object):
 
         # check if the endpoint is hosted by the compute node before running the RTT cypher query
         hosted_by_node = graph.relationships.match(nodes=(endpoint_node, None), r_type="hostedBy").first().end_node
-        if hosted_by_node["name"] == compute_node["name"]:
+        if hosted_by_node["name"] == startpoint_node["name"]:
             result = {"forward_latencies": [], "reverse_latencies": [], "response_time": endpoint_node["response_time"],
                       "request_size": endpoint_node["request_size"], "response_size": endpoint_node["response_size"]}
         else:
-            query_to_execute = RTT_CYPHER_QUERY_TEMPLATE.format(compute_node_label, endpoint_node_label, graph_id)
+            query_to_execute = RTT_CYPHER_QUERY_TEMPLATE.format(startpoint_node_type, startpoint_node_label, endpoint_node_label, graph_id)
             log.info("Executing cypher query: {0}".format(query_to_execute))
             data = graph.run(query_to_execute).data()  # returns a list of dictionaries, each dictionary represents a row in the result
-            result = data[0]
+            try:
+                result = data[0]
+            except Exception as e:
+                log.error("Unexpected error occurred while executing RTT cypher query for graph with UUID {0} - {1}".format(graph_id, e))
+                raise HTTPBadRequest("The Neo4j cypher query didn't return a valid result for the temporal graph with ID {0}".format(graph_id))
 
         sf_node = graph.match(nodes=(None, endpoint_node), r_type="realisedBy").first().start_node
         if sf_node is None:
@@ -166,6 +208,7 @@ class GraphAPI(object):
 
         result["global_tags"] = {"flame_sfe": endpoint_node["name"], "flame_server": hosted_by_node["name"], "flame_location": hosted_by_node["name"],
                                  "flame_sfc": reference_node["sfc"], "flame_sfci": reference_node["sfci"], "flame_sfp": sf_package_node["name"], "flame_sf": sf_node["name"]}
+        result["local_tags"] = {"traffic_source": startpoint_node_label}
 
         # calculate the Round-Trip-Time
         total_forward_latency = sum(result["forward_latencies"])
@@ -206,3 +249,206 @@ class GraphAPI(object):
             forward_data_delay, reverse_data_delay = 0, 0
 
         return forward_latency + forward_data_delay + service_delay + reverse_latency + reverse_data_delay
+
+    @view_config(route_name='graph_network_topology', request_method='POST')
+    def build_network_topology(self):
+        """
+        An API endpoint to build/update the network topology in the neo4j graph.
+
+        :return: A JSON response with the number of switches, clusters and ues that were built.
+        """
+
+        graph = Graph(host=self.request.registry.settings['neo4j_host'], password=self.request.registry.settings['neo4j_password'])  # connect to the neo4j graph db
+
+        sdn_controller_ip = self.request.registry.settings['sdn_controller_ip']
+        sdn_controller_port = self.request.registry.settings['sdn_controller_port']
+
+        # retrieve all switches - if SDN controller is unavailable on the given IP address return 503 Service Unavailable
+        try:
+            url = "http://{0}:{1}{2}".format(sdn_controller_ip, sdn_controller_port, "/wm/core/controller/switches/json")
+            response = get(url)
+        except exceptions.ConnectionError:
+            msg = "The SDN controller is not available on IP {0} and port {1}.".format(sdn_controller_ip, sdn_controller_port)
+            log.error("Unexpected error: {0}".format(msg))
+            raise HTTPServiceUnavailable("The SDN controller couldn't be reached when trying to build the network topology.")
+
+        # check if the SDN controller returned the expected response
+        if response.status_code != 200:
+            msg = "The SDN controller returned a response with status code different than 200."
+            log.error("Unexpected error: {0}".format(msg))
+            raise HTTPNotImplemented("The SDN controller failed to return a successful response when querying for the list of switches.")
+
+        try:
+            content = response.json()
+        except ValueError:  # response not in JSON
+            msg = "The SDN controller returned a response which couldn't be converted to JSON."
+            log.error("Unexpected error: {0}".format(msg))
+            raise HTTPNotImplemented("The SDN controller failed to return a valid JSON response when querying for the list of switches.")
+
+        # map the DPID of each switch to its IP address
+        switches = {}
+        for switch in content:
+            # map the dpid to the switch IP address, the IP address is in the format '/172.168.23.54:1234'
+            switches[switch["switchDPID"]] = switch["inetAddress"][1:].split(":")[0]
+
+        # retrieve all external links (gathered through BDDP) - if SDN controller is unavailable on the given IP address return 503 Service Unavailable
+        try:
+            url = "http://{0}:{1}{2}".format(sdn_controller_ip, sdn_controller_port, "/wm/topology/external-links/json")
+            response = get(url)
+        except exceptions.ConnectionError:
+            msg = "The SDN controller is not available on IP {0} and port {1}.".format(sdn_controller_ip, sdn_controller_port)
+            log.error("Unexpected error: {0}".format(msg))
+            raise HTTPServiceUnavailable("The SDN controller couldn't be reached when trying to build the network topology.")
+
+        # check if the SDN controller returned the expected response
+        if response.status_code != 200:
+            msg = "The SDN controller returned a response with status code different than 200."
+            log.error("Unexpected error: {0}".format(msg))
+            raise HTTPNotImplemented("The SDN controller failed to return a successful response when querying for the network topology.")
+
+        try:
+            external_links = response.json()
+        except ValueError:  # response not in JSON
+            msg = "The SDN controller returned a response which couldn't be converted to JSON."
+            log.error("Unexpected error: {0}".format(msg))
+            raise HTTPNotImplemented("The SDN controller failed to return a valid JSON response when querying for the network topology.")
+
+        # retrieve all local links (gathered through LLDP) - if SDN controller is unavailable on the given IP address return 503 Service Unavailable
+        try:
+            url = "http://{0}:{1}{2}".format(sdn_controller_ip, sdn_controller_port, "/wm/topology/links/json")
+            response = get(url)
+        except exceptions.ConnectionError:
+            msg = "The SDN controller is not available on IP {0} and port {1}.".format(sdn_controller_ip, sdn_controller_port)
+            log.error("Unexpected error: {0}".format(msg))
+            raise HTTPServiceUnavailable("The SDN controller couldn't be reached when trying to build the network topology.")
+
+        if response.status_code != 200:
+            msg = "The SDN controller returned a response with status code different than 200."
+            log.error("Unexpected error: {0}".format(msg))
+            raise HTTPNotImplemented("The SDN controller failed to return a successful response when querying for the network topology.")
+
+        try:
+            local_links = response.json()
+        except ValueError:  # response not in JSON
+            msg = "The SDN controller returned a response which couldn't be converted to JSON."
+            log.error("Unexpected error: {0}".format(msg))
+            raise HTTPNotImplemented("The SDN controller failed to return a valid JSON response when querying for the network topology.")
+
+        # TODO this is a temporary solution - currently the service router to clusters mapping is read from a file (which must be manually prepared beforehand)
+        clusters_file = self.request.registry.settings["network_clusters_path"]
+        try:
+            with open(clusters_file) as fh:
+                clusters = load(fh)
+        except Exception as e:
+            log.error("Unexpected error: {0}".format(e))
+            log.error("No service-router-to-cluster mapping was found while building the network topology.")
+            clusters = {}
+
+        # TODO this is a temporary solution - currently the service router to ues mapping is read from a file (which must be manually prepared beforehand)
+        ues_file = self.request.registry.settings["network_ues_path"]
+        try:
+            with open(ues_file) as fh:
+                ues = load(fh)
+        except Exception as e:
+            log.error("Unexpected error: {0}".format(e))
+            log.error("No service-router-to-ue mapping was found while building the network topology.")
+            ues = {}
+
+        # build the network graph and retrieve the number of switch nodes and cluster nodes that were created
+        tmp_switch_count, tmp_clusters_count, tmp_ues_count = build_network_graph(graph, switches, external_links, clusters, ues)
+        switch_count, clusters_count, ues_count = build_network_graph(graph, switches, local_links, clusters, ues)
+        switch_count += tmp_switch_count
+        clusters_count += tmp_clusters_count
+        ues_count += tmp_ues_count
+
+        return {"new_switches_count": switch_count, "new_clusters_count": clusters_count, "new_ues_count": ues_count}
+
+    @view_config(route_name='graph_network_topology', request_method='DELETE')
+    def delete_network_topology(self):
+        """
+        An API endpoint to delete the network topology in the neo4j graph.
+
+        :return: A JSON response with the number of switches, clusters and ues that were deleted.
+        """
+
+        graph = Graph(host=self.request.registry.settings['neo4j_host'], password=self.request.registry.settings['neo4j_password'])  # connect to the neo4j graph db
+
+        deleted_switches, deleted_clusters, deleted_ues = delete_network_graph(graph)
+
+        return {"deleted_switches_count": deleted_switches, "deleted_clusters_count": deleted_clusters, "deleted_ues_count": deleted_ues}
+
+    @view_config(route_name='graph_execute_pipeline', request_method='POST')
+    def execute_graph_pipeline(self):
+        """
+        An API endpoint to execute the graph pipeline script as a background process.
+
+        :return: A JSON response with the database name and request's uuid.
+        """
+
+        try:
+            body = self.request.body.decode(self.request.charset)
+            json_queries = validate_monitor_request_body(body)  # validate the content and receive a json dictionary object
+        except AssertionError as e:
+            raise HTTPBadRequest("Bad request content: {0}".format(e.args))
+
+        influx_client = InfluxDBClient(host=self.request.registry.settings['influx_host'], port=self.request.registry.settings['influx_port'], timeout=10)
+
+        database_name = json_queries["service_function_chain"]
+        if database_name not in [db["name"] for db in influx_client.get_list_database()]:
+            raise HTTPBadRequest("Database for service function chain {0} not found.".format(database_name))
+
+        request_uuid = str(uuid4())
+        sfc = json_queries["service_function_chain"]
+
+        # get the list of ues
+        ues_file = self.request.registry.settings["network_ues_path"]
+        try:
+            with open(ues_file) as fh:
+                ues = load(fh)
+        except Exception as e:
+            log.error("Unexpected error: {0}".format(e))
+            log.error("No service-router-to-ue mapping was found while building the network topology.")
+            ues = {}
+
+        ues_list = list(ues.values())
+        json_queries["ues"] = ues_list
+
+        process = Popen(["graph-pipeline.sh", dumps(json_queries)])
+        process_pid = process.pid
+        process_return_code = process.returncode
+
+        if process_return_code is None:  # process has started running
+            log.info("Started a graph pipeline process for SFC {0} with PID {1}".format(sfc, process_pid))
+
+            MonitoringProcess.add({"request_id": request_uuid, "process_id": process_pid})
+
+            return {"database": database_name, "uuid": request_uuid}
+        else:  # a valid returned code was returned, hence the process has terminated one way or another - we do not expect this since the pipeline script must be continuously running
+            log.warning("Graph pipeline process for SFC {0} with PID {1} has finished executing unexpectedly with return code {2}".format(sfc, process_pid, process_return_code))
+            raise HTTPInternalServerError("An unexpected error occurred while trying to start monitoring graph measurements for service function chain {0}".format(sfc))
+
+    @view_config(route_name='graph_manage_pipeline', request_method='DELETE')
+    def stop_graph_pipeline(self):
+        """
+        An API endpoint to stop a running in the background graph pipeline script.
+
+        :return: A JSON response with a simple info message for the process
+        """
+
+        request_id = self.request.matchdict['request_id']  # get the UUID of the request from the URL
+        process_id = MonitoringProcess.get(request_id)
+
+        if process_id is None:
+            raise HTTPNotFound("A monitoring process with ID {0} couldn't be found.".format(request_id))
+
+        try:
+            kill(process_id, SIGKILL)
+            log.info("Successfully stopped process with request ID {0} and process ID {1}".format(request_id, process_id))
+            response = {"msg": "Monitoring process has been successfully stopped."}
+        except OSError as e:
+            log.warning("Couldn't stop monitoring process with request ID {0} and process ID {1} due to error {2}".format(request_id, process_id, e))
+            response = {"msg": "Monitoring process has been stopped before this request was executed."}
+
+        MonitoringProcess.delete(request_id)
+
+        return response
diff --git a/src/service/clmcservice/models/__init__.py b/src/service/clmcservice/models/__init__.py
index bdf3774dab46f54179ef4e30aa05c18f2956f9e9..345922d34bbb09512b630750722082e86a645eaf 100644
--- a/src/service/clmcservice/models/__init__.py
+++ b/src/service/clmcservice/models/__init__.py
@@ -1,2 +1,5 @@
 from .meta import DBSession
+
+from .graphapi_models import MonitoringProcess
+# NOTE: all ORM models defined in this package must be imported here (in the __init__.py file) - Pyramid and SQLAlchemy specific approach
 from .whoami_models import ServiceFunctionEndpoint
diff --git a/src/service/clmcservice/models/graphapi_models.py b/src/service/clmcservice/models/graphapi_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..3526cdf3309a2cf167447077233a0edcf47f0232
--- /dev/null
+++ b/src/service/clmcservice/models/graphapi_models.py
@@ -0,0 +1,80 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          25-02-2019
+//      Created for Project :   FLAME
+"""
+
+
+class _MonitoringProcess:
+    """
+    Container-like class used to store key value pairs between graph pipelines request identifiers and process identifiers.
+    The class is declared as "pseudo-private" and is ought to be initialised only in this module and exposed as a singleton.
+    This class mimics the interfaces of the other model classes.
+    """
+
+    def __init__(self):
+
+        self.__pipelines = {}
+
+    def add(self, instance):
+        """
+        Adds a new key-value pair to the container representing a request ID mapped to a PID.
+
+        :param instance: JSON representation of key value pair - e.g. {"request_id": <reequest uuid>, "process_id": <process ID>}
+        """
+
+        self.__pipelines[instance["request_id"]] = instance["process_id"]
+
+    def delete(self, request_id):
+        """
+        Deletes a key-value pair from the container representing a request ID mapped to a PID.
+
+        :param request_id: the request UUID
+        """
+
+        self.__pipelines.pop(request_id)
+
+    def get(self, request_id):
+        """
+        Gets the data associated with a request identifier - currently the data is just the process identifier.
+
+        :param request_id: the request identifier to check
+
+        :return: the PID for this request ID or None if it doesn't exist
+        """
+
+        return self.__pipelines.get(request_id)
+
+    def exists(self, request_id):
+        """
+        Checks if a request ID is associated to a given process ID
+
+        :param request_id: the request identifier to check
+
+        :return: True or False
+        """
+
+        return request_id in self.__pipelines
+
+
+# Only initialise the class here, this instance is imported in models/__init__.py so that it behaves like the other ORM models,
+# the container's interface mimics the interface of the other ORM models
+MonitoringProcess = _MonitoringProcess()
diff --git a/src/service/development.ini b/src/service/development.ini
index cbc930b6b81038ed0ef4b074d0ff0e674458e9b9..e132f45f6e063f977e0decb8f93ed3bfbde1c25d 100644
--- a/src/service/development.ini
+++ b/src/service/development.ini
@@ -15,7 +15,11 @@ pyramid.includes = pyramid_debugtoolbar pyramid_exclog
 exclog.ignore =
 
 
-network_configuration_path = /vagrant/src/service/resources/GraphAPI/network_config.json
+network_clusters_path = /opt/clmc/src/service/resources/GraphAPI/network_clusters.json
+network_ues_path = /opt/clmc/src/service/resources/GraphAPI/network_ues.json
+
+# 10000 Mb/s = 10 Gb/s
+network_bandwidth = 10000
 
 # PostgreSQL connection url
 sqlalchemy.url = postgresql://clmc:clmc_service@localhost:5432/whoamidb
diff --git a/src/service/production.ini b/src/service/production.ini
index 4d9613152f2ad78fe7bdab6c811a18b3fc8f3cec..c11a6346ac8553954538356f5e1c261a94bc3a74 100644
--- a/src/service/production.ini
+++ b/src/service/production.ini
@@ -15,7 +15,11 @@ pyramid.includes = pyramid_exclog
 exclog.ignore =
 
 
-network_configuration_path = /opt/clmc/src/service/resources/GraphAPI/network_config.json
+network_clusters_path = /opt/clmc/src/service/resources/GraphAPI/network_clusters.json
+network_ues_path = /opt/clmc/src/service/resources/GraphAPI/network_ues.json
+
+# 10000 Mb/s = 10 Gb/s
+network_bandwidth = 10000
 
 # PostgreSQL connection url
 sqlalchemy.url = postgresql://clmc:clmc_service@localhost:5432/whoamidb
diff --git a/src/service/resources/GraphAPI/network_clusters.json b/src/service/resources/GraphAPI/network_clusters.json
new file mode 100644
index 0000000000000000000000000000000000000000..40fe2629a6ab664643591eb95533bfe7613520b8
--- /dev/null
+++ b/src/service/resources/GraphAPI/network_clusters.json
@@ -0,0 +1,6 @@
+{
+  "172.20.231.11": "20-sr1-cluster1-cluster",
+  "172.20.231.18": "22-sr1-cluster1-cluster",
+  "172.20.231.17": "23-sr1-cluster1-cluster",
+  "172.20.231.2": "24-sr1-cluster1-cluster"
+}
\ No newline at end of file
diff --git a/src/service/resources/GraphAPI/network_ues.json b/src/service/resources/GraphAPI/network_ues.json
new file mode 100644
index 0000000000000000000000000000000000000000..40c1c973a03afcf29a8c3b2f42b9fdac85277831
--- /dev/null
+++ b/src/service/resources/GraphAPI/network_ues.json
@@ -0,0 +1,6 @@
+{
+  "172.20.231.3": "ue20",
+  "172.20.231.22": "ue22",
+  "172.20.231.7": "ue23",
+  "172.20.231.19": "ue24"
+}
\ No newline at end of file
diff --git a/src/test/clmctest/alerts/resources_test_config.yaml b/src/test/clmctest/alerts/resources_test_config.yaml
index 9bd17df6fb473fcea31b132aeae249168e997d58..6f83faa50183b9d0d3504da362c937cd288f27fa 100644
--- a/src/test/clmctest/alerts/resources_test_config.yaml
+++ b/src/test/clmctest/alerts/resources_test_config.yaml
@@ -3,7 +3,7 @@ tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
 metadata:
   template_name: Flame CLMC Alerts Integration Test
   servicefunctionchain: MS_Template_1
-  sfci: MS_I1
+#  sfci: MS_I1
 
 
 # Import own definitions of nodes, capabilities and policy syntax.