diff --git a/docs/clmc-service.md b/docs/clmc-service.md
index e52635aa8d1b72ac616850bbc32f90c38c8ae148..978afeff1e400a2923d20a26d3d9621520dd0ba6 100644
--- a/docs/clmc-service.md
+++ b/docs/clmc-service.md
@@ -352,6 +352,26 @@ with **/clmc-service** so that the nginx reverse proxy server (listening on port
         }
         ```
 
+* **DELETE** ***/graph/{service_function_chain_identifier}***
+
+    This API method send a request to delete a full media service graph identified by its service function chain identifier - this means deleting the SFC node, all SFC instance nodes
+    linked to the SFC node, all SF package, SF nodes and temporal SF endpoint nodes which are part of the graph for this SFC.
+    
+    * Response:
+
+        The response of this request is a JSON content, which contains the number of deleted nodes.
+
+        Returns a 404 Not Found error if the SFC identifier is not associated with any SFC nodes in the graph.
+
+    * Response Body Example:
+
+        ```json
+        {
+           "deleted": 10
+        }
+        ```
+
+
 * **GET** ***/graph/temporal/{graph_id}/round-trip-time?startpoint={startpoint_id}&endpoint={endpoint_id}***
 
     This API method sends a request to run the Cypher Round-Trip-Time query over a temporal graph associated with a request UUID (retrieved from the response of a build-graph request).
diff --git a/src/service/clmcservice/__init__.py b/src/service/clmcservice/__init__.py
index 1f40aaca0173ec291b97faefaa1e626f8789fe77..8ffb6251225ecbac4907be35e431e3c5d5d05853 100644
--- a/src/service/clmcservice/__init__.py
+++ b/src/service/clmcservice/__init__.py
@@ -67,7 +67,8 @@ def main(global_config, **settings):
 
     # add routes of the GRAPH API
     config.add_route('graph_build', '/graph/temporal')
-    config.add_route('graph_manage', '/graph/temporal/{graph_id}')
+    config.add_route('temporal_graph_manage', '/graph/temporal/{graph_id}')
+    config.add_route('full_graph_manage', '/graph/{sfc_id}')
     config.add_route('graph_algorithms_rtt', '/graph/temporal/{graph_id}/round-trip-time')
     config.add_route('graph_network_topology', '/graph/network')
     config.add_route('graph_execute_pipeline', '/graph/monitor')
diff --git a/src/service/clmcservice/graphapi/tests.py b/src/service/clmcservice/graphapi/tests.py
index 0e72a83e71d5a3b97ce840e9a08cfd49eb30e7db..3abc635640fd48a15d34b9bd3a0c2df8e731519b 100644
--- a/src/service/clmcservice/graphapi/tests.py
+++ b/src/service/clmcservice/graphapi/tests.py
@@ -107,7 +107,7 @@ class TestGraphAPI(object):
         Tests the graph build API endpoint - it makes 2 API calls and checks that the expected graph was created (the influx data that's being used is reported to InfluxDB in the conftest file)
 
         :param uuid_mock: mock object to mock the behaviour of the uuid4 function
-        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest)
+        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data and the graph db client object (this is a fixture from conftest)
         """
 
         from_timestamp, to_timestamp, graph_db = db_testing_data
@@ -123,7 +123,7 @@ class TestGraphAPI(object):
 
         # Create a valid build request and send it to the API endpoint
         uuid_mock.return_value = "graph_test_build_uuid1"
-        responses = graph_generator(from_timestamp, to_timestamp)
+        responses = graph_generator(from_timestamp, to_timestamp)  # generates 2 graphs by sending request to the build API and yielding back the responses for each request
         response = next(responses)
 
         graph_subresponse = response.pop("graph")
@@ -231,18 +231,24 @@ class TestGraphAPI(object):
             assert endpoint_node["request_size"] == pytest.approx(request_size, 1), "Wrong request size attribute of endpoint node"
             assert endpoint_node["response_size"] == pytest.approx(response_size, 1), "Wrong response size attribute of endpoint node"
 
+        request = testing.DummyRequest()
+        request.matchdict["sfc_id"] = "test_sfc"
+        response = GraphAPI(request).delete_full_graph()
+        assert response == {"deleted": 17}
+
     @patch('clmcservice.graphapi.views.uuid4')
-    def test_delete(self, uuid_mock, db_testing_data):
+    def test_delete_temporal(self, uuid_mock, db_testing_data):
         """
         Tests the delete API endpoint of the Graph API - the test depends on the build test to have been passed successfully so that graph_1_id and graph_2_id have been set
 
-        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest)
+        :param uuid_mock: the mock object used to mimic the behaviour of the uuid.uuid4 function
+        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data and the graph db client object (this is a fixture from conftest)
         """
 
         from_timestamp, to_timestamp, graph_db = db_testing_data
 
         # build test graphs
-        responses = graph_generator(from_timestamp, to_timestamp)
+        responses = graph_generator(from_timestamp, to_timestamp)  # generates 2 graphs by sending request to the build API and yielding back the responses for each request
 
         uuid_mock.return_value = "graph_test_delete_uuid1"
         graph_1_test_id = uuid_mock.return_value
@@ -282,11 +288,46 @@ class TestGraphAPI(object):
         assert set([node["name"] for node in graph_db.nodes.match("Cluster")]) == set(["DC" + str(i) for i in range(1, 7)]), "Cluster nodes must not be deleted"
         assert set([node["name"] for node in graph_db.nodes.match("Switch")]) == set(["127.0.0." + str(i) for i in range(1, 7)]), "Switch nodes must not be deleted"
         assert set([node["name"] for node in graph_db.nodes.match("UserEquipment")]) == set(["ue" + str(i) for i in (2, 3, 6)]), "UE nodes must not be deleted"
+        assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionEndpoint")]) == set(), "Endpoint nodes must have been deleted"
         assert set([node["name"] for node in graph_db.nodes.match("ServiceFunction")]) == {"nginx_1", "apache_1", "minio_1", "minio_2"}, "Service functions must not be deleted."
         assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionPackage")]) == {"nginx", "minio", "apache"}, "Service function packages must not be deleted"
         assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChainInstance")]) == {"test_sfc_premium", "test_sfc_non_premium"}, "Service function chain instances must not be deleted"
         assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChain")]) == {"test_sfc"}, "Service function chains must not be deleted"
 
+        request = testing.DummyRequest()
+        request.matchdict["sfc_id"] = "test_sfc"
+        response = GraphAPI(request).delete_full_graph()
+        assert response == {"deleted": 10}
+
+    def test_delete_full(self, db_testing_data):
+        """
+        Tests the functionality to delete the full media service graph starting from a service function chain node down to an endpoint node.
+
+        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data and the graph db client object (this is a fixture from conftest)
+
+        :return:
+        """
+
+        from_timestamp, to_timestamp, graph_db = db_testing_data
+
+        responses = graph_generator(from_timestamp, to_timestamp)  # generates 2 graphs by sending request to the build API and yielding back the responses for each request
+        next(responses)
+        next(responses)
+
+        request = testing.DummyRequest()
+        request.matchdict["sfc_id"] = "invalid_test_sfc"
+        error_raised = False
+        try:
+            GraphAPI(request).delete_full_graph()
+        except HTTPNotFound:
+            error_raised = True
+        assert error_raised, "Error must have been raised for invalid SFC identifier"
+
+        request = testing.DummyRequest()
+        request.matchdict["sfc_id"] = "test_sfc"
+        response = GraphAPI(request).delete_full_graph()
+        assert response == {"deleted": 17}
+
     @pytest.mark.parametrize("graph_id, endpoint, startpoint, error_type, error_msg", [
         ('e8cd4768-47dd-48cd-9c74-7f8926ddbad8', None, None, HTTPBadRequest, "HTTP Bad Request must be thrown in case of missing or invalid url parameters"),
         ('e8cd4768-47dd-48cd-9c74-7f8926ddbad8', None, "nginx", HTTPBadRequest, "HTTP Bad Request must be thrown in case of missing or invalid url parameters"),
@@ -322,13 +363,13 @@ class TestGraphAPI(object):
         """
         Tests the rtt API endpoint of the Graph API.
 
-        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest)
+        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data and the graph db client object (this is a fixture from conftest)
         """
 
         from_timestamp, to_timestamp, graph_db = db_testing_data
 
         # create a graph to use for RTT test by using the build API endpoint
-        responses = graph_generator(from_timestamp, to_timestamp)
+        responses = graph_generator(from_timestamp, to_timestamp)  # generates 2 graphs by sending request to the build API and yielding back the responses for each request
         response = next(responses)
         request_id = response["graph"]["uuid"]
 
diff --git a/src/service/clmcservice/graphapi/utilities.py b/src/service/clmcservice/graphapi/utilities.py
index a9746984585401d0afd0c8770cbbab5e75befe03..a87ffbf7a22bc03879aa61d98f9688e43c8c99e3 100644
--- a/src/service/clmcservice/graphapi/utilities.py
+++ b/src/service/clmcservice/graphapi/utilities.py
@@ -24,7 +24,7 @@
 
 from json import loads
 from py2neo import Node, Relationship
-import logging
+from logging import getLogger
 
 
 GRAPH_ROUND_TRIP_TIME_URL_PARAMS = ("startpoint", "endpoint")
@@ -60,7 +60,7 @@ RETURN latencies  as forward_latencies, reverse(latencies) as reverse_latencies,
 # """
 
 
-log = logging.getLogger('service_logger')
+log = getLogger('service_logger')
 
 
 def validate_build_request_body(body):
@@ -461,3 +461,46 @@ def find_node_with_possible_types(name, possible_types, graph):
             return node, type_
 
     return None, None
+
+
+def depth_first_search(graph, root_node):
+    """
+    A generator, which performs a depth-first search through the graph starting from a root node and stopping when it reaches an Endpoint node.
+
+    :param graph: the graph db client
+    :param root_node: the root node reference (e.g. a ServiceFunctionChain node)
+
+    :return: a sequence of nodes traversed in depth-first manner
+    """
+
+    # a separate stack used to store the nodes in post-order
+    post_order = []
+
+    # a map between the type of a node and the type of edges that are going to be searched - e.g. for nodes of type ServiceFunctionChain, look for utilizedBy and instanceOf edges
+    node_edges_labels = {"ServiceFunctionChain": ("utilizedBy", "instanceOf"), "ServiceFunctionChainInstance": ("utilizedBy",), "ServiceFunction": ("realisedBy",)}
+
+    # the stack will hold a node and the type of labels
+    stack = [root_node]
+
+    log.info("Performing DFS starting from node {0}".format(root_node["name"]))
+    # simple declarative Depth-First search using a stack
+    while len(stack) > 0:
+        current_node = stack.pop()
+        post_order.append(current_node)
+
+        current_node_type = list(current_node.labels)[0]  # a node might have multiple labels in Neo4j, but in our scenario we expect exactly one label
+        for edge_label in node_edges_labels.get(current_node_type, []):  # for each possible edge label of the given node type, or empty list by default value
+            for relationship in graph.match({current_node, }, r_type=edge_label):  # we use a set because we do not care for direction
+                # the direction of the relationship is ambiguous so check which end of it we need to append
+                if relationship.end_node == current_node:
+                    stack.append(relationship.start_node)
+                else:
+                    stack.append(relationship.end_node)
+
+    log.info("Performing post order iteration after DFS is finished.")
+    # yield the elements in post order
+    while len(post_order) > 0:
+        node = post_order.pop()
+        print(node["name"])
+        yield node
+        log.debug("Yielding node {0}".format(node["name"]))
diff --git a/src/service/clmcservice/graphapi/views.py b/src/service/clmcservice/graphapi/views.py
index 4c442a3e5dd128b3ab7cf66b4618bf95d0fff3b3..78fea3fd86e817980d0fad947efbc187d9a13bf9 100644
--- a/src/service/clmcservice/graphapi/views.py
+++ b/src/service/clmcservice/graphapi/views.py
@@ -24,7 +24,7 @@
 
 
 from clmcservice.graphapi.utilities import validate_build_request_body, validate_monitor_request_body, RTT_CYPHER_QUERY_TEMPLATE, \
-    build_network_graph, delete_network_graph, build_temporal_subgraph, delete_temporal_subgraph, validate_graph_rtt_params, find_node_with_possible_types
+    build_network_graph, delete_network_graph, build_temporal_subgraph, delete_temporal_subgraph, validate_graph_rtt_params, find_node_with_possible_types, depth_first_search
 from clmcservice.models import MonitoringProcess
 from influxdb import InfluxDBClient
 from py2neo import Graph
@@ -91,7 +91,7 @@ class GraphAPI(object):
 
         return json_response
 
-    @view_config(route_name='graph_manage', request_method='DELETE')
+    @view_config(route_name='temporal_graph_manage', request_method='DELETE')
     def delete_temporal_graph(self):
         """
         An API endpoint to delete a temporal graph associated with a uuid generated by the CLMC service.
@@ -110,6 +110,37 @@ class GraphAPI(object):
         number_of_deleted_nodes = delete_temporal_subgraph(graph, graph_id)
         return {"deleted": number_of_deleted_nodes}
 
+    @view_config(route_name='full_graph_manage', request_method='DELETE')
+    def delete_full_graph(self):
+        """
+        An API endpoint to delete a media service graph with a given SFC identifier.
+
+        :return: A JSON document containing the number of deleted nodes.
+
+        :raises HTTPNotFound: if there is no SFC node with the given identifier
+        """
+
+        sfc_id = self.request.matchdict['sfc_id']  # get the SFC identifier from the URL
+        graph = Graph(host=self.request.registry.settings['neo4j_host'], password=self.request.registry.settings['neo4j_password'])  # connect to the neo4j graph db
+
+        # check if this SFC node exists
+        sfc_node = graph.nodes.match("ServiceFunctionChain", name=sfc_id).first()
+        if sfc_node is None:
+            raise HTTPNotFound("No service function chain node found with identifier {0}".format(sfc_id))
+
+        # delete all nodes that are linked to the SFC node
+        count = 0
+        for node in depth_first_search(graph, sfc_node):
+            graph.delete(node)
+            count += 1
+
+        # delete any reference nodes for temporal graphs that are associated to this SFC identifier
+        for node in graph.nodes.match("Reference", sfc=sfc_id):
+            graph.delete(node)
+            count += 1
+
+        return {"deleted": count}
+
     @view_config(route_name='graph_algorithms_rtt', request_method='GET')
     def run_rtt_query(self):
         """