diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index c50739fb8257d0ef80b9e69d41cda1228ccae5e8..f974de808716834067eb52249ca615229c46db1f 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -35,8 +35,8 @@ build:tests:
     - python setup.py sdist --dist-dir=$CI_PROJECT_DIR/build
   artifacts:
     paths:
-    - build/clmctest-2.2.0.tar.gz
-    - build/clmcservice-2.2.0.tar.gz
+    - build/clmctest-2.3.0.tar.gz
+    - build/clmcservice-2.3.0.tar.gz
     expire_in: 1 day
 
 test:all:
@@ -50,8 +50,8 @@ test:all:
     - echo "REPO_PASS=${REPO_PASS}" >> $CI_PROJECT_DIR/reporc
     - sudo scripts/test/fixture.sh create -f src/test/clmctest/rspec.json -r $CI_PROJECT_DIR -c all
     - sudo mkdir /var/lib/lxd/containers/test-runner/rootfs/opt/clmc/build
-    - sudo cp build/clmctest-2.2.0.tar.gz /var/lib/lxd/containers/test-runner/rootfs/opt/clmc/build
-    - sudo lxc exec test-runner -- pip3 install /opt/clmc/build/clmctest-2.2.0.tar.gz
+    - sudo cp build/clmctest-2.3.0.tar.gz /var/lib/lxd/containers/test-runner/rootfs/opt/clmc/build
+    - sudo lxc exec test-runner -- pip3 install /opt/clmc/build/clmctest-2.3.0.tar.gz
     - sudo lxc exec test-runner -- pytest -s --tb=short -rfp --pyargs clmctest
   when: on_success      
   
diff --git a/docs/clmc-service.md b/docs/clmc-service.md
index 129b2359a1df1cab9f9660fa3dcd96f2fd2eee92..ef4b1ee2f63c4f615df857b10e78368f257a2d5b 100644
--- a/docs/clmc-service.md
+++ b/docs/clmc-service.md
@@ -153,6 +153,94 @@ with **/clmc-service** so that the nginx reverse proxy server (listening on port
     The request/response format of this method is the same as the **POST /alerts** API endpoint with the only difference being that existing alert tasks
     or handlers will be re-created rather than returning a duplication error.
 
+* **DELETE** ***/alerts***
+
+    This API method can be used to send an alert specification document, which is then used by the CLMC service to delete
+    alert tasks and deregister alert handlers in Kapacitor. Essentially, it is a clean-up endpoint for the alerts API. 
+    For further information on the alert specification document, please check the [CLMC Alert Specification Documentation](AlertsSpecification.md).
+
+    * Request:
+    
+        Expects a YAML-formatted file in the request referenced with ID ***alert-spec*** representing the TOSCA alert specification 
+        document. The alert specification document is then parsed with the openstack TOSCA parser (https://github.com/openstack/tosca-parser/tree/master/toscaparser)
+        and validated against the CLMC alerts specification schema (again check [documentation](AlertsSpecification.md) for more info on this).
+
+    * Example for sending a request with curl:
+    
+        `curl -X DELETE -F "alert-spec=@alert-specification.yaml" http://localhost:9080/alerts`
+        
+        where **alert-specification.yaml** is the path to the alerts specification file.
+    
+    * Response:
+    
+        The response of this request is a JSON-formatted content, which contains two lists - one for the alerts that were found in the TOSCA specification
+        and then deleted from Kapacitor, and one for the respective alert handlers that were deleted from Kapacitor.
+                        
+        Returns a 400 Bad Request if the request does not contain a yaml file referenced with ID **alert-spec**.
+        
+        Returns a 400 Bad Request if the alert specification file is not a valid YAML file.
+        
+        Returns a 400 Bad Request if the alert specification file cannot be parsed with the TOSCA parser.
+        
+        Returns a 400 Bad Request if the alert specification file fails validation against the CLMC alerts specification schema.
+    
+    * Response Body Example:
+    
+        ```json
+        {
+          "deleted_alerts": [{"policy": "scale_nginx_policy", "trigger": "high_requests"}, 
+                             {"policy": "scale_nginx_policy", "trigger": "increase_in_active_requests"},
+                             {"policy": "scale_nginx_policy", "trigger": "increase_in_running_processes"}, 
+                             {"policy": "deadman_policy", "trigger": "no_measurements"}], 
+          "deleted_handlers": [{"policy": "scale_nginx_policy", "trigger": "increase_in_active_requests", "handler": "flame_sfemc"},
+                               {"policy": "scale_nginx_policy", "trigger": "increase_in_running_processes", "handler": "flame_sfemc"},
+                               {"policy": "deadman_policy", "trigger": "no_measurements", "handler": "flame_sfemc"},
+                               {"policy": "scale_nginx_policy", "trigger": "high_requests", "handler": "http://172.40.231.200:9999/"},
+                               {"policy": "scale_nginx_policy", "trigger": "increase_in_active_requests", "handler": "http://172.40.231.200:9999/"},
+                               {"policy": "scale_nginx_policy", "trigger": "increase_in_running_processes", "handler": "http://172.40.231.200:9999/"},
+                               {"policy": "deadman_policy", "trigger": "no_measurements", "handler": "http://172.40.231.200:9999/"}]
+        }
+        ```
+
+* **GET** ***/alerts/{sfc_id}***
+
+    This API method can be used to fetch all alerts that are registered for instances of the service function chain identified
+    by the ***sfc_id*** url parameter.
+
+    * Request:
+    
+        Expects a URL with the following parameter - **sfc_id**. The given parameter should uniquely
+        identify a service function chain for which the registered alerts information will be returned.
+        
+    * Request URL Examples:
+    
+        **/alerts/MSDemo**
+
+    * Response:
+    
+        The response of this request is a JSON-formatted content, which contains a mapping between SFC instance identifiers and 
+        lists of alert objects each of which represents a registered alert and contains the alert policy ID, trigger ID and Kapacitor resources including handlers.
+        Therefore, the response maps an SFC instance identifier to the list of alerts that were registered for this SFC instance.
+
+    * Response Example:
+
+        ```json
+        {
+         "MSDemo_1": [
+                {
+                    "policy": "<TOSCA policy ID>",
+                    "trigger": "<TOSCA trigger ID>",
+                    "handlers": ["<list of URLs that receive alerts through POST requests>"],
+                    "task_identifier": "<Kapacitor task identifier>",
+                    "topic_identifier": "<Kapacitor topic identifier>",
+                    "task_api_endpoint": "/kapacitor/v1/tasks/<Kapacitor task identifier>",
+                    "topic_api_endpoint": "/kapacitor/v1/alerts/topics/<Kapacitor topic identifier>",
+                    "topic_handlers_api_endpoint": "/kapacitor/v1/alerts/topics/<Kapacitor topic identifier>/handlers"
+                }
+            ]
+        }      
+        ```
+
 * **GET** ***/alerts/{sfc_id}/{sfc_instance_id}***
 
     This API method can be used to fetch all alerts that are registered for a specific service function chain instance identified
@@ -160,7 +248,7 @@ with **/clmc-service** so that the nginx reverse proxy server (listening on port
     
     * Request:
     
-        Expects a URL query with the following parameters - **sfc_id**, **sfc_instance_id**. The given parameters should uniquely
+        Expects a URL with the following parameters - **sfc_id**, **sfc_instance_id**. The given parameters should uniquely
         identify a service function chain instance for which the registered alerts information will be returned.
         
     * Request URL Examples:
@@ -191,52 +279,59 @@ with **/clmc-service** so that the nginx reverse proxy server (listening on port
         ]
         ```
 
-* **DELETE** ***/alerts***
+* **DELETE** ***/alerts/{sfc_id}***
 
-    This API method can be used to send an alert specification document, which is then used by the CLMC service to delete
-    alert tasks and deregister alert handlers in Kapacitor. Essentially, it is a clean-up endpoint for the alerts API. 
-    For further information on the alert specification document, please check the [CLMC Alert Specification Documentation](AlertsSpecification.md).
+    This API method can be used to delete all alerts that are registered for instances of a specific service function chain identified
+    by the ***sfc_id*** URL parameter.
 
     * Request:
     
-        Expects a YAML-formatted file in the request referenced with ID ***alert-spec*** representing the TOSCA alert specification 
-        document. The alert specification document is then parsed with the openstack TOSCA parser (https://github.com/openstack/tosca-parser/tree/master/toscaparser)
-        and validated against the CLMC alerts specification schema (again check [documentation](AlertsSpecification.md) for more info on this).
-
-    * Example for sending a request with curl:
-    
-        `curl -X DELETE -F "alert-spec=@alert-specification.yaml" http://localhost:9080/alerts`
+        Expects a URL with the following parameter - **sfc_id**. The given parameter should uniquely
+        identify a service function chain for which the registered alerts will be deleted.
         
-        where **alert-specification.yaml** is the path to the alerts specification file.
+    * Request URL Examples:
     
+        **/alerts/MSDemo**
+
     * Response:
     
-        The response of this request is a JSON-formatted content, which contains two lists - one for the alerts that were found in the TOSCA specification
-        and then deleted from Kapacitor, and one for the respective alert handlers that were deleted from Kapacitor.
-                        
-        Returns a 400 Bad Request if the request does not contain a yaml file referenced with ID **alert-spec**.
+        The response of this request is a JSON-formatted content, which contains the number of deleted alerts for each instance of this SFC.
+    
+    * Response Example:
         
-        Returns a 400 Bad Request if the alert specification file is not a valid YAML file.
+        ```json
+        {
+            "MSDemo_1" : {
+                "deleted_alerts_count": 10
+            }
+        }
+        ```
+
+* **DELETE** ***/alerts/{sfc_id}/{sfc_instance_id}***
+
+    This API method can be used to delete all alerts that are registered for a specific service function chain instance identified
+    by the ***sfc_id*** and ***sfc_instance_id*** URL parameters.
+    
+    * Request:
+    
+        Expects a URL with the following parameters - **sfc_id**, **sfc_instance_id**. The given parameters should uniquely
+        identify a service function chain instance for which the registered alerts will be deleted.
         
-        Returns a 400 Bad Request if the alert specification file cannot be parsed with the TOSCA parser.
+    * Request URL Examples:
+    
+        **/alerts/MSDemo/MSDemo_1**
         
-        Returns a 400 Bad Request if the alert specification file fails validation against the CLMC alerts specification schema.
+        **/alerts/SimpleMediaService/SimpleMediaService_1**
     
-    * Response Body Example:
+    * Response:
+    
+        The response of this request is a JSON-formatted content, which contains the number of deleted alerts.
     
+    * Response Example:
+        
         ```json
         {
-          "deleted_alerts": [{"policy": "scale_nginx_policy", "trigger": "high_requests"}, 
-                             {"policy": "scale_nginx_policy", "trigger": "increase_in_active_requests"},
-                             {"policy": "scale_nginx_policy", "trigger": "increase_in_running_processes"}, 
-                             {"policy": "deadman_policy", "trigger": "no_measurements"}], 
-          "deleted_handlers": [{"policy": "scale_nginx_policy", "trigger": "increase_in_active_requests", "handler": "flame_sfemc"},
-                               {"policy": "scale_nginx_policy", "trigger": "increase_in_running_processes", "handler": "flame_sfemc"},
-                               {"policy": "deadman_policy", "trigger": "no_measurements", "handler": "flame_sfemc"},
-                               {"policy": "scale_nginx_policy", "trigger": "high_requests", "handler": "http://172.40.231.200:9999/"},
-                               {"policy": "scale_nginx_policy", "trigger": "increase_in_active_requests", "handler": "http://172.40.231.200:9999/"},
-                               {"policy": "scale_nginx_policy", "trigger": "increase_in_running_processes", "handler": "http://172.40.231.200:9999/"},
-                               {"policy": "deadman_policy", "trigger": "no_measurements", "handler": "http://172.40.231.200:9999/"}]
+            "deleted_alerts_count": 10
         }
         ```
 
@@ -277,6 +372,36 @@ with **/clmc-service** so that the nginx reverse proxy server (listening on port
         }
         ```
 
+## Data Management API Endpoints
+
+* **DELETE** ***/data/{sfc_id}***
+
+    This API method can be used to delete all alerts registered for instances of a specific service function chain, identified
+    by the ***sfc_id*** URL parameter, as well as any time-series data (N.B. the time-series database created for this SFC will be deleted).
+    
+    * Request:
+    
+        Expects a URL with the following parameter - **sfc_id**. The given parameter should uniquely
+        identify a service function chain for which data stored on CLMC (alerts and measurements) will be deleted.
+        
+    * Request URL Examples:
+    
+        **/data/MSDemo**
+        
+        **/data/SimpleMediaService**
+    
+    * Response:
+    
+        Returns a simple message to indicate that the deletion of alerts and database has been successful.
+    
+    * Response Example:
+        
+        ```json
+        {
+            "msg": "Time-series data and alerts have been successfully deleted for SFC MSDemo."
+        }
+        ```
+
 ## Graph API Endpoints
 
 * **Assumptions**
diff --git a/src/service/VERSION b/src/service/VERSION
index 1f8197ce8a3b7dcfa3937a90ffee1db776aaace7..59124ba3ed3e948992d5cf837750ed347b4f46db 100644
--- a/src/service/VERSION
+++ b/src/service/VERSION
@@ -1 +1 @@
-__version__ = "2.2.0"
\ No newline at end of file
+__version__ = "2.3.0"
\ No newline at end of file
diff --git a/src/service/clmcservice/__init__.py b/src/service/clmcservice/__init__.py
index c17e71b9506d71736ab7eeeba4ce71af9d4150eb..d9d6410d905dcabb8b7dd778eb72383408852f85 100644
--- a/src/service/clmcservice/__init__.py
+++ b/src/service/clmcservice/__init__.py
@@ -52,8 +52,9 @@ def main(global_config, **settings):
     settings['sdn_controller_ip'] = os.environ['SDN_CONTROLLER_IP']  # read the SDN controller IP address from the OS environment
     settings['sdn_controller_port'] = int(os.environ.get('SDN_CONTROLLER_PORT', 8080))  # read the SDN controller port number from the OS environment, if not set use 8080 as default
 
-    settings['influx_port'] = int(settings['influx_port'])  # the influx port setting must be converted to integer instead of a string
+    settings['influx_port'] = int(settings['influx_port'])  # the influxdb port setting must be converted to integer instead of a string
     settings['kapacitor_port'] = int(settings['kapacitor_port'])  # the kapacitor port setting must be converted to integer instead of a string
+    settings['clmc_service_port'] = int(settings['clmc_service_port'])  # the clmc service port setting must be converted to integer instead of a string
 
     settings["network_bandwidth"] = int(settings["network_bandwidth"])  # TODO currently assumed fixed bandwidth across all links
 
@@ -77,6 +78,10 @@ def main(global_config, **settings):
     # add routes of the Alerts Configuration API
     config.add_route('alerts_configuration', '/alerts')
     config.add_route('alerts_configuration_instance', '/alerts/{sfc_id}/{sfc_instance_id}')
+    config.add_route('alerts_configuration_aggregate', '/alerts/{sfc_id}')
+
+    # add routes of the Data Management API
+    config.add_route('data_management_aggregate', '/data/{sfc_id}')
 
     config.scan()  # this method scans the packages and finds any views related to the routes added in the app configuration
     return config.make_wsgi_app()
diff --git a/src/service/clmcservice/alertsapi/tests.py b/src/service/clmcservice/alertsapi/tests.py
index a24b5970154dd664885908286e714dd4d7e7092e..ef481158a18177a10afb2aa8357ccfb5c1515542 100644
--- a/src/service/clmcservice/alertsapi/tests.py
+++ b/src/service/clmcservice/alertsapi/tests.py
@@ -205,7 +205,7 @@ class TestAlertsConfigurationAPI(object):
             assert "triggers_action_errors" not in clmc_service_response, "Unexpected error was returned for handlers specification"
 
             # traverse through all alerts and check that they are created along with their respective handlers within Kapacitor
-            check_kapacitor_alerts(alerts, kapacitor_host, kapacitor_port, alerts_test_file)
+            check_kapacitor_alerts_created(alerts, kapacitor_host, kapacitor_port, alerts_test_file)
 
             # send the same spec again to check that error messages are returned (because of ID duplication)
             with open(alert_spec_abs_path) as alert_spec:
@@ -234,7 +234,7 @@ class TestAlertsConfigurationAPI(object):
             assert "triggers_action_errors" not in clmc_service_response, "Unexpected error was returned for handlers specification"
 
             # traverse through all alerts and check that they were not deleted by the PUT request
-            check_kapacitor_alerts(alerts, kapacitor_host, kapacitor_port, alerts_test_file)
+            check_kapacitor_alerts_created(alerts, kapacitor_host, kapacitor_port, alerts_test_file)
 
             # clean-up in the end of the test
             clean_kapacitor_alerts(alerts, kapacitor_host, kapacitor_port)
@@ -280,7 +280,7 @@ class TestAlertsConfigurationAPI(object):
             assert "triggers_action_errors" not in clmc_service_response, "Unexpected error was returned for handlers specification"
 
             # traverse through all alerts and check that they were created by the PUT request
-            check_kapacitor_alerts(alerts, kapacitor_host, kapacitor_port, alerts_test_file)
+            check_kapacitor_alerts_created(alerts, kapacitor_host, kapacitor_port, alerts_test_file)
 
             # clean-up in the end of the test
             clean_kapacitor_alerts(alerts, kapacitor_host, kapacitor_port)
@@ -322,9 +322,15 @@ class TestAlertsConfigurationAPI(object):
                 request = testing.DummyRequest()
                 request.matchdict["sfc_id"] = sfc
                 request.matchdict["sfc_instance_id"] = sfc_instance
-                response = AlertsConfigurationAPI(request).get_alerts()
+                response = AlertsConfigurationAPI(request).get_all_sfc_instance_alerts()
                 assert response == [], "Incorrect response when fetching registered alerts, expecting empty list"
 
+                # send a GET request for registered alerts, expecting an empty mapping
+                request = testing.DummyRequest()
+                request.matchdict["sfc_id"] = sfc
+                response = AlertsConfigurationAPI(request).get_all_sfc_alerts()
+                assert response == {}, "Incorrect response when fetching registered alerts, expecting empty mapping"
+
                 # send valid alert and resource spec to create the alerts to be fetched afterwards through a GET request
                 with open(resource_spec_abs_path) as resource_spec:
                     request = testing.DummyRequest()
@@ -332,12 +338,6 @@ class TestAlertsConfigurationAPI(object):
                     request.POST['resource-spec'] = FieldStorageMock(resources_test_file, resource_spec)
                     AlertsConfigurationAPI(request).post_alerts_specification()
 
-            # send a GET request for registered alerts, expecting the newly created alerts
-            request = testing.DummyRequest()
-            request.matchdict["sfc_id"] = sfc
-            request.matchdict["sfc_instance_id"] = sfc_instance
-            response = AlertsConfigurationAPI(request).get_alerts()
-
             # restructure the extracted alerts data to be comparable with the response of the clmc service
             expected_alerts = map(lambda alert_object: {
                 "policy": alert_object["policy"], "trigger": alert_object["trigger"],
@@ -349,12 +349,33 @@ class TestAlertsConfigurationAPI(object):
                 "topic_handlers_api_endpoint": "/kapacitor/v1/alerts/topics/{0}/handlers".format(alert_object["topic"])
             }, alerts)
             expected_alerts = sorted(expected_alerts, key=lambda x: x["trigger"])
+
+            # send a GET request for registered alerts, expecting the newly created alerts
+            request = testing.DummyRequest()
+            request.matchdict["sfc_id"] = sfc
+            request.matchdict["sfc_instance_id"] = sfc_instance
+            response = AlertsConfigurationAPI(request).get_all_sfc_instance_alerts()
+            received_sfc_instance_alerts = response  # the response is a list of alert objects configured for this SFC instance
+
+            # sort the handlers of each alert in the response to ensure comparison order is correct
+            for alert in received_sfc_instance_alerts:
+                alert["handlers"] = sorted(alert["handlers"])
+
+            # compare the actual response with the expected response
+            assert sorted(received_sfc_instance_alerts, key=lambda x: x["trigger"]) == expected_alerts, "Incorrect result returned from a GET alerts request"
+
+            # send a GET request for registered alerts, expecting a mapping to the newly created alerts
+            request = testing.DummyRequest()
+            request.matchdict["sfc_id"] = sfc
+            response = AlertsConfigurationAPI(request).get_all_sfc_alerts()
+            received_sfc_alerts = response[sfc_instance]  # the response is a mapping between SFC instance IDs and list of alert objects configured for the SFC instance
+
             # sort the handlers of each alert in the response to ensure comparison order is correct
-            for alert in response:
+            for alert in received_sfc_alerts:
                 alert["handlers"] = sorted(alert["handlers"])
 
             # compare the actual response with the expected response
-            assert sorted(response, key=lambda x: x["trigger"]) == expected_alerts, "Incorrect result returned from a GET alerts request"
+            assert sorted(received_sfc_alerts, key=lambda x: x["trigger"]) == expected_alerts, "Incorrect result returned from a GET alerts request"
 
             clean_kapacitor_alerts(alerts, kapacitor_host, kapacitor_port)
 
@@ -398,7 +419,7 @@ class TestAlertsConfigurationAPI(object):
 
             # ensure that these resource (tasks, topics, handlers) exist in Kapacitor
             # traverse through all alerts and check that everything is created
-            check_kapacitor_alerts(alerts, kapacitor_host, kapacitor_port, alerts_test_file)
+            check_kapacitor_alerts_created(alerts, kapacitor_host, kapacitor_port, alerts_test_file)
 
             with open(alert_spec_abs_path) as alert_spec:
                 # now send the alert spec for deletion and check that everything is deleted in Kapacitor
@@ -427,21 +448,7 @@ class TestAlertsConfigurationAPI(object):
 
             # ensure that these resource (tasks, topics, handlers) do not exist in Kapacitor anymore
             # traverse through all alerts and check that everything is deleted from Kapacitor
-            for alert in alerts:
-                # check the alert task
-                alert_id = alert["task"]
-                kapacitor_response = get("http://{0}:{1}/kapacitor/v1/tasks/{2}".format(kapacitor_host, kapacitor_port, alert_id))
-                assert kapacitor_response.status_code == 404, "Alert with ID {0} was not deleted - test file {1}.".format(alert_id, alerts_test_file)
-
-                # check the alert topic
-                topic_id = alert["topic"]
-                kapacitor_response = get("http://{0}:{1}/kapacitor/v1/alerts/topics/{2}".format(kapacitor_host, kapacitor_port, topic_id))
-                assert kapacitor_response.status_code == 404, "Topic with ID {0} was not deleted - test file {1}".format(topic_id, alerts_test_file)
-
-                # check handlers
-                for handler_id in alert["handlers"]:
-                    kapacitor_response = get("http://{0}:{1}/kapacitor/v1/alerts/topics/{2}/handlers/{3}".format(kapacitor_host, kapacitor_port, topic_id, handler_id))
-                    assert kapacitor_response.status_code == 404, "Handler with ID {0} for topic with ID {1} was not deleted - test file {2}".format(handler_id, topic_id, alerts_test_file)
+            check_kapacitor_alerts_deleted(alerts, kapacitor_host, kapacitor_port, alerts_test_file)
 
             # now send a second delete request to ensure that nothing else is deleted
             with open(alert_spec_abs_path) as alert_spec:
@@ -451,8 +458,92 @@ class TestAlertsConfigurationAPI(object):
 
             assert response == {"deleted_alerts": [], "deleted_handlers": []}, "Incorrect response after a second delete"
 
+    def test_alerts_api_delete_all(self, app_config):
+        """
+        Tests the DELETE API endpoint of the alerts configuration API responsible for deleting every alert registered for a specific service function chain.
+
+        Test steps are:
+            * Traverse all valid TOSCA Alerts Specifications and TOSCA Resource Specifications in the
+                src/service/clmcservice/resources/tosca/test-data/clmc-validator/valid and src/service/clmcservice/resources/tosca/test-data/tosca-parser/valid folders
+            * Send a valid TOSCA Alert Specification to the view responsible for configuring Kapacitor and creating alerts
+            * Send a DELETE reqquest to the view responsible for deleting all the created alerts
+            * Check that all Kapacitor resources (task, topic, handler) have been deleted
+
+        :param app_config: fixture for setUp/tearDown of the web service registry
+        """
+
+        kapacitor_host = self.config.registry.settings["kapacitor_host"]
+        kapacitor_port = self.config.registry.settings["kapacitor_port"]
+        sfemc_fqdn = self.config.registry.settings["sfemc_fqdn"]
+        sfemc_port = self.config.registry.settings["sfemc_port"]
+
+        # test all of the test files provided by the path_generator_testfiles function, ignoring the last result, which is invalid resource spec. files
+        for alert_spec_file_paths, valid_resource_spec_file_paths, _ in path_generator_testfiles():
+            alert_spec_abs_path, alerts_test_file = alert_spec_file_paths  # absolute path and name of the alerts spec. file
+            resource_spec_abs_path, resources_test_file = valid_resource_spec_file_paths  # absolute path and name of a valid resource spec. file
+
+            with open(alert_spec_abs_path) as alert_spec:
+
+                # extract the alert specification data, ignore SFC and SFC instance IDs
+                sfc_id, sfc_instance_id, alerts = extract_alert_configuration_data(alert_spec, sfemc_fqdn, sfemc_port)
+                alert_spec.seek(0)  # reset the read pointer to the beginning again (the extraction in the previous step had to read the full file)
+
+                # send valid alert and resource spec to create the alerts to be deleted afterwards
+                with open(resource_spec_abs_path) as resource_spec:
+                    request = testing.DummyRequest()
+                    request.POST['alert-spec'] = FieldStorageMock(alerts_test_file, alert_spec)  # a simple mock class is used to mimic the FieldStorage class
+                    request.POST['resource-spec'] = FieldStorageMock(resources_test_file, resource_spec)
+                    AlertsConfigurationAPI(request).post_alerts_specification()
+
+            # ensure that these resource (tasks, topics, handlers) exist in Kapacitor
+            # traverse through all alerts and check that everything is created
+            check_kapacitor_alerts_created(alerts, kapacitor_host, kapacitor_port, alerts_test_file)
+
+            # send a DELETE request for all registered alerts for this specific SFC instance
+            request = testing.DummyRequest()
+            request.matchdict["sfc_id"] = sfc_id
+            request.matchdict["sfc_instance_id"] = sfc_instance_id
+            response = AlertsConfigurationAPI(request).delete_all_sfc_instance_alerts()
+            assert response == {"deleted_alerts_count": len(alerts)}, "Incorrect response returned when deleting all alerts for a specific SFC instance"
+
+            # ensure that these resource (tasks, topics, handlers) do not exist in Kapacitor anymore
+            # traverse through all alerts and check that everything is deleted from Kapacitor
+            check_kapacitor_alerts_deleted(alerts, kapacitor_host, kapacitor_port, alerts_test_file)
+
+            # create the alerts again
+            with open(alert_spec_abs_path) as alert_spec:
+                with open(resource_spec_abs_path) as resource_spec:
+                    request = testing.DummyRequest()
+                    request.POST['alert-spec'] = FieldStorageMock(alerts_test_file, alert_spec)  # a simple mock class is used to mimic the FieldStorage class
+                    request.POST['resource-spec'] = FieldStorageMock(resources_test_file, resource_spec)
+                    AlertsConfigurationAPI(request).put_alerts_specification()  # put or post, should not matter
+            check_kapacitor_alerts_created(alerts, kapacitor_host, kapacitor_port, alerts_test_file)  # ensure alerts were created before deleting them
+
+            # send a DELETE request for all registered alerts for this specific SFC
+            request = testing.DummyRequest()
+            request.matchdict["sfc_id"] = sfc_id
+            response = AlertsConfigurationAPI(request).delete_all_sfc_alerts()
+            assert response == {sfc_instance_id: {"deleted_alerts_count": len(alerts)}}, "Incorrect response returned when deleting all alerts for a specific SFC"
+            check_kapacitor_alerts_deleted(alerts, kapacitor_host, kapacitor_port, alerts_test_file)  # ensure the alerts were actually deleted
+
+            # now send a second delete request to ensure that nothing else is deleted
+            request = testing.DummyRequest()
+            request.matchdict["sfc_id"] = sfc_id
+            request.matchdict["sfc_instance_id"] = sfc_instance_id
+            response = AlertsConfigurationAPI(request).delete_all_sfc_instance_alerts()
+            assert response == {"deleted_alerts_count": 0}, "Incorrect response returned when deleting all alerts for a specific SFC instance"
+
+            # also send a delete request for all SFC instances, expecting empty mapping (no alerts found for any SFC instances)
+            request = testing.DummyRequest()
+            request.matchdict["sfc_id"] = sfc_id
+            response = AlertsConfigurationAPI(request).delete_all_sfc_alerts()
+            assert response == {}, "Incorrect response returned when deleting all alerts for a specific SFC"
 
+####
 # The implementation below is for utility methods and classes used in the tests' implementation.
+####
+
+
 class FieldStorageMock(object):
 
     def __init__(self, filename, file):
@@ -568,7 +659,7 @@ def get_alert_type(event_type, alert_period):
         return events[event_type]
 
 
-def check_kapacitor_alerts(alerts, kapacitor_host, kapacitor_port, alerts_test_file):
+def check_kapacitor_alerts_created(alerts, kapacitor_host, kapacitor_port, alerts_test_file):
     """
     Check that all Kapactior resources are created with the expected values.
 
@@ -610,6 +701,33 @@ def check_kapacitor_alerts(alerts, kapacitor_host, kapacitor_port, alerts_test_f
             assert kapacitor_response_json["options"]["url"] == handler_url, "Incorrect url of handler {0} in the Kapacitor response - test file {1}".format(handler_id, alerts_test_file)
 
 
+def check_kapacitor_alerts_deleted(alerts, kapacitor_host, kapacitor_port, alerts_test_file):
+    """
+    Check that all Kapactior resources do not exist.
+
+    :param alerts: the list of alert objects to test
+    :param kapacitor_host: hostname of kapacitor
+    :param kapacitor_port: port number of kapacitor
+    :param alerts_test_file: name of the current test file
+    """
+
+    for alert in alerts:
+        # check the alert task
+        alert_id = alert["task"]
+        kapacitor_response = get("http://{0}:{1}/kapacitor/v1/tasks/{2}".format(kapacitor_host, kapacitor_port, alert_id))
+        assert kapacitor_response.status_code == 404, "Alert with ID {0} was not deleted - test file {1}.".format(alert_id, alerts_test_file)
+
+        # check the alert topic
+        topic_id = alert["topic"]
+        kapacitor_response = get("http://{0}:{1}/kapacitor/v1/alerts/topics/{2}".format(kapacitor_host, kapacitor_port, topic_id))
+        assert kapacitor_response.status_code == 404, "Topic with ID {0} was not deleted - test file {1}".format(topic_id, alerts_test_file)
+
+        # check handlers
+        for handler_id in alert["handlers"]:
+            kapacitor_response = get("http://{0}:{1}/kapacitor/v1/alerts/topics/{2}/handlers/{3}".format(kapacitor_host, kapacitor_port, topic_id, handler_id))
+            assert kapacitor_response.status_code == 404, "Handler with ID {0} for topic with ID {1} was not deleted - test file {2}".format(handler_id, topic_id, alerts_test_file)
+
+
 def clean_kapacitor_alerts(alerts, kapacitor_host, kapacitor_port):
     """
     A utility function to clean up Kapacitor from the configured alerts, topics and handlers.
diff --git a/src/service/clmcservice/alertsapi/views.py b/src/service/clmcservice/alertsapi/views.py
index 8b39d09a507ce2f9dc5e4277d8d0faaa96572cfb..f2bb1e609e44d4094d96d558f6c0a8a3c6a05b19 100644
--- a/src/service/clmcservice/alertsapi/views.py
+++ b/src/service/clmcservice/alertsapi/views.py
@@ -25,6 +25,7 @@
 # Python standard libs
 import logging
 from hashlib import sha256
+from collections import defaultdict
 
 # PIP installed libs
 from pyramid.httpexceptions import HTTPBadRequest
@@ -51,6 +52,10 @@ class AlertsConfigurationAPI(object):
 
     DUAL_VERSION_TEMPLATES = {"threshold"}  # this set defines all template types that are written in two versions (stream and batch)
 
+    KAPACITOR_TASK_API_PREFIX = "/kapacitor/v1/tasks"
+
+    KAPACITOR_TOPIC_API_PREFIX = "/kapacitor/v1/alerts/topics"
+
     def __init__(self, request):
         """
         Initialises the instance of the view with the request argument.
@@ -83,61 +88,205 @@ class AlertsConfigurationAPI(object):
 
         return {
             "task_identifier": alert_id,
-            "task_api_endpoint": "/kapacitor/v1/tasks/{0}".format(alert_id),
+            "task_api_endpoint": "{0}/{1}".format(self.KAPACITOR_TASK_API_PREFIX, alert_id),
             "topic_identifier": topic_id,
-            "topic_api_endpoint": "/kapacitor/v1/alerts/topics/{0}".format(topic_id),
-            "topic_handlers_api_endpoint": "/kapacitor/v1/alerts/topics/{0}/handlers".format(topic_id)
+            "topic_api_endpoint": "{0}/{1}".format(self.KAPACITOR_TOPIC_API_PREFIX, topic_id),
+            "topic_handlers_api_endpoint": "{0}/{1}/handlers".format(self.KAPACITOR_TOPIC_API_PREFIX, topic_id)
         }
 
+    @view_config(route_name='alerts_configuration_aggregate', request_method='GET')
+    def get_all_sfc_alerts(self):
+        """
+        The view for retrieving all alerts and Kapacitor resources registered for instances of a specific service function chain.
+
+        :return: a mapping between SFC instance IDs and list of alert objects,
+            each object representing an alert and containing the policy ID, trigger ID, task ID, topic ID and Kapacitor endpoints for these
+        """
+
+        # fetch the URL parameter
+        sfc_id = self.request.matchdict["sfc_id"]
+
+        return self._get_all_alerts(sfc_id)
+
     @view_config(route_name='alerts_configuration_instance', request_method='GET')
-    def get_alerts(self):
+    def get_all_sfc_instance_alerts(self):
         """
         The view for retrieving all alerts and Kapacitor resources registered for a specific service function chain instance.
 
-        :return: a list of objects, each object representing an alert and containing the policy ID, trigger ID, task ID, topic ID and kapacitor endpoints for these
+        :return: a list of alert objects, each object representing an alert and containing the policy ID, trigger ID, task ID, topic ID and kapacitor endpoints for these
+        """
+
+        # fetch the URL parameters
+        sfc_id, sfc_instance_id = self.request.matchdict["sfc_id"], self.request.matchdict["sfc_instance_id"]
+
+        # return the list of alert objects for the SFC instance parameter
+        return self._get_all_alerts(sfc_id, sfc_instance_id).get(sfc_instance_id, [])  # defaults to empty list if no alerts are found for this SFC instance
+
+    def _get_all_alerts(self, sfc_id, sfc_instance_id=None):
+        """
+        Fetch all alerts configured for a specific SFC or SFC instance.
+
+        :param sfc_id: the SFC identifier
+        :param sfc_instance_id: the SFC instance identifier (defaults to None), if set to None, return the alerts for all instances of this SFC
+
+        :return: metadata of the CLMC alerts that were configured for this SFC (a mapping between SFC instance IDs and list of alert objects)
         """
 
         kapacitor_host, kapacitor_port = self.request.registry.settings['kapacitor_host'], self.request.registry.settings['kapacitor_port']
         sfemc_fqdn, sfemc_port = self.request.registry.settings['sfemc_fqdn'], self.request.registry.settings['sfemc_port']
         fqdn_prefix = "http://{0}:{1}/sfemc/event".format(sfemc_fqdn, sfemc_port)
 
+        # fetch all alerts that are configured for this SFC
+        tasks_to_return = defaultdict(list)
+
+        # traverse every registered task and check if it is configured for this SFC instance
+        for task in self._fetch_alert_tasks_for_sfc(sfc_id, sfc_instance_id):
+
+            # get the configured variables of this alert
+            task_config = task["vars"]
+
+            # get the TOSCA spec. configuration values
+            task_id = task["id"]
+            topic_id = task_id
+            policy_id = task_config["policy"]["value"]
+            trigger_id = task_config["eventID"]["value"]
+
+            kapacitor_handlers_relative_url = "{0}/{1}/handlers".format(self.KAPACITOR_TOPIC_API_PREFIX, topic_id)
+            kapacitor_handlers_full_url = "http://{0}:{1}{2}".format(kapacitor_host, kapacitor_port, kapacitor_handlers_relative_url)
+            handlers = [handler_obj["options"]["url"] for handler_obj in get(kapacitor_handlers_full_url).json()["handlers"]]
+            handlers = list(map(lambda handler: SFEMC if handler.startswith(fqdn_prefix) else handler, handlers))
+
+            alert_object = {"policy": policy_id, "trigger": trigger_id, "handlers": handlers,
+                            "task_identifier": task_id, "topic_identifier": topic_id,
+                            "task_api_endpoint": "{0}/{1}".format(self.KAPACITOR_TASK_API_PREFIX, task_id),
+                            "topic_api_endpoint": "{0}/{1}".format(self.KAPACITOR_TOPIC_API_PREFIX, topic_id),
+                            "topic_handlers_api_endpoint": kapacitor_handlers_relative_url}
+
+            # add it to the list of alerts for this SFC instance
+            tasks_to_return[task_config["sfci"]["value"]].append(alert_object)
+
+        return tasks_to_return
+
+    @view_config(route_name='alerts_configuration_aggregate', request_method='DELETE')
+    def delete_all_sfc_alerts(self):
+        """
+        The view function for deleting all alerts and Kapacitor resources registered for all instances of a specific service function chain.
+
+        :return: the number of deleted alerts for each SFC instance.
+        """
+
+        # fetch the URL parameter
+        sfc_id = self.request.matchdict["sfc_id"]
+
+        return self._delete_all_alerts(sfc_id)
+
+    @view_config(route_name='alerts_configuration_instance', request_method='DELETE')
+    def delete_all_sfc_instance_alerts(self):
+        """
+        The view function for deleting all alerts and Kapacitor resources registered for a specific service function chain instance.
+
+        :return: the number of deleted alerts for this specific SFC instance.
+        """
+
         # fetch the URL parameters
         sfc_id, sfc_instance_id = self.request.matchdict["sfc_id"], self.request.matchdict["sfc_instance_id"]
 
-        # get all tasks from kapacitor
-        kapacitor_tasks_url = "http://{0}:{1}/kapacitor/v1/tasks".format(kapacitor_host, kapacitor_port)
-        all_tasks = get(kapacitor_tasks_url).json()["tasks"]
+        # if no SFC instance alerts were deleted, manually set the count to 0
+        return self._delete_all_alerts(sfc_id, sfc_instance_id).get(sfc_instance_id, {"deleted_alerts_count": 0})  # defaults to 0 deleted alerts if SFC instance is not found
+
+    def _delete_all_alerts(self, sfc_id, sfc_instance_id=None):
+        """
+        Delete all alerts configured for a specific SFC or SFC instance.
+
+        :param sfc_id: the SFC identifier
+        :param sfc_instance_id: the SFC instance identifier (defaults to None), if set to None, delete the alerts for all instances of this SFC
+
+        :return: a mapping between SFC instance IDs and dictionary showing the count of deleted alerts)
+        """
+
+        kapacitor_host, kapacitor_port = self.request.registry.settings['kapacitor_host'], self.request.registry.settings['kapacitor_port']
 
-        # fetch all alerts that are configured for this SFC instance
-        sfc_instance_tasks = []
+        deleted_count_dict = defaultdict(dict)
 
         # traverse every registered task and check if it is configured for this SFC instance
-        for task in all_tasks:
+        for task in self._fetch_alert_tasks_for_sfc(sfc_id, sfc_instance_id):
+
+            # delete the Kapacitor task
+            task_id = task["id"]
+            kapacitor_task_relative_url = "{0}/{1}".format(self.KAPACITOR_TASK_API_PREFIX, task_id)
+            kapacitor_task_full_url = "http://{0}:{1}{2}".format(kapacitor_host, kapacitor_port, kapacitor_task_relative_url)
+            log.info("Sending DELETE request to Kapacitor for URL - {0}".format(kapacitor_task_full_url))
+            response = delete(kapacitor_task_full_url)
+            log.info("Received status code {0} from Kapacitor for URL - {1}".format(response.status_code, kapacitor_task_full_url))
+
+            # log the response body if the status code was different than 204
+            assert response.status_code == 204, "Received response status code different than 204 when deleting an alert task - {0}.".format(response.text)
+
+            # increment the count of deleted alerts
+            delete_dict = deleted_count_dict[task["vars"]["sfci"]["value"]]
+            try:
+                delete_dict["deleted_alerts_count"] += 1
+            except KeyError:
+                delete_dict["deleted_alerts_count"] = 1
+
+            # get the Kapacitor topic and the subscribed handlers
+            topic_id = task_id
+            kapacitor_topic_relative_url = "{0}/{1}".format(self.KAPACITOR_TOPIC_API_PREFIX, topic_id)
+            kapacitor_topic_full_url = "http://{0}:{1}{2}".format(kapacitor_host, kapacitor_port, kapacitor_topic_relative_url)
+            kapacitor_topic_handlers_full_url = "{0}{1}".format(kapacitor_topic_full_url, "/handlers")
+
+            log.info("Sending GET request to Kapacitor for URL - {0}".format(kapacitor_topic_handlers_full_url))
+            response = get(kapacitor_topic_handlers_full_url)
+            log.info("Received status code {0} from Kapacitor for URL - {1}".format(response.status_code, kapacitor_topic_handlers_full_url))
+
+            # traverse each handler and delete it
+            for handler in response.json()["handlers"]:
+                handler_id = handler["id"]
+                kapacitor_topic_handlers_instance_full_url = "{0}/{1}".format(kapacitor_topic_handlers_full_url, handler_id)
+                log.info("Sending DELETE request to Kapacitor for URL - {0}".format(kapacitor_topic_handlers_instance_full_url))
+                response = delete(kapacitor_topic_handlers_instance_full_url)
+                log.info("Received status code {0} from Kapacitor for URL - {1}".format(response.status_code, kapacitor_topic_handlers_instance_full_url))
+                assert response.status_code == 204, "Received response status code different than 204 when deleting an alert topic handler - {0}.".format(response.text)
+
+            # delete the Kapacitor topic
+            log.info("Sending DELETE request to Kapacitor for URL - {0}".format(kapacitor_topic_full_url))
+            response = delete(kapacitor_topic_full_url)
+            log.info("Received status code {0} from Kapacitor for URL - {1}".format(response.status_code, kapacitor_topic_full_url))
+            assert response.status_code == 204, "Received response status code different than 204 when deleting an alert topic - {0}.".format(response.text)
+
+        return deleted_count_dict
+
+    def _fetch_alert_tasks_for_sfc(self, sfc_id, sfc_instance_id=None):
+        """
+        Utility method to retrieve the alert tasks configured for a specific service function chain and service function chain instance.
 
-            # get the configured variables of this alert
-            task_config = task["vars"]
+        :param sfc_id: the SFC identifier
+        :param sfc_instance_id: the SFC instance identifier (defaults to None), if set to None, return alerts for all instances of this SFC
+
+        :return: the alerts configuration for the SFC identified with the function parameters, yielding each of these using an iterator pattern
+        """
 
-            # if configured for this SFC instance
-            if task_config["sfc"]["value"] == sfc_id and task_config["sfci"]["value"] == sfc_instance_id:
+        kapacitor_host, kapacitor_port = self.request.registry.settings['kapacitor_host'], self.request.registry.settings['kapacitor_port']
 
-                task_id = task["id"]
-                topic_id = task_id
-                policy_id = task_config["policy"]["value"]
-                trigger_id = task_config["eventID"]["value"]
+        # get all tasks from Kapacitor
+        kapacitor_tasks_url = "http://{0}:{1}{2}".format(kapacitor_host, kapacitor_port, self.KAPACITOR_TASK_API_PREFIX)
+        log.info("Sending GET request to Kapacitor for URL - {0}".format(kapacitor_tasks_url))
+        response = get(kapacitor_tasks_url)
+        log.info("Received status code {0} from Kapacitor for URL - {1}".format(response.status_code, kapacitor_tasks_url))
 
-                kapacitor_handlers_relative_url = "/kapacitor/v1/alerts/topics/{0}/handlers".format(topic_id)
-                kapacitor_handlers_full_url = "http://{0}:{1}{2}".format(kapacitor_host, kapacitor_port, kapacitor_handlers_relative_url)
-                handlers = [handler_obj["options"]["url"] for handler_obj in get(kapacitor_handlers_full_url).json()["handlers"]]
-                handlers = list(map(lambda handler: SFEMC if handler.startswith(fqdn_prefix) else handler, handlers))
+        all_tasks = response.json()["tasks"]
 
-                # add it to the list of alerts for this SFC instance
-                sfc_instance_tasks.append({"policy": policy_id, "trigger": trigger_id, "handlers": handlers,
-                                           "task_identifier": task_id, "topic_identifier": topic_id,
-                                           "task_api_endpoint": "/kapacitor/v1/tasks/{0}".format(task_id),
-                                           "topic_api_endpoint": "/kapacitor/v1/alerts/topics/{0}".format(topic_id),
-                                           "topic_handlers_api_endpoint": kapacitor_handlers_relative_url})
+        # traverse every registered task and check if it is configured for this SFC instance
+        for task in all_tasks:
+
+            # get the configured variables of this alert
+            task_config = task["vars"]
 
-        return sfc_instance_tasks
+            # if the SFC parameter matches
+            if task_config["sfc"]["value"] == sfc_id:
+                # if the SFC instance parameter matches or it was set to None
+                if sfc_instance_id is None or task_config["sfci"]["value"] == sfc_instance_id:
+                    yield task
 
     @view_config(route_name='alerts_configuration', request_method='DELETE')
     def delete_alerts_specification(self):
@@ -177,13 +326,13 @@ class AlertsConfigurationAPI(object):
                 task_id = topic_id
 
                 # delete alert task
-                kapacitor_api_task_url = "http://{0}:{1}/kapacitor/v1/tasks/{2}".format(kapacitor_host, kapacitor_port, task_id)
+                kapacitor_api_task_url = "http://{0}:{1}{2}/{3}".format(kapacitor_host, kapacitor_port, self.KAPACITOR_TASK_API_PREFIX, task_id)
                 if get(kapacitor_api_task_url).status_code == 200:
                     delete(kapacitor_api_task_url)
                     alerts.append({"policy": policy_id, "trigger": event_id})
 
                 # get all alert handlers
-                kapacitor_api_topic_handlers_url = "http://{0}:{1}/kapacitor/v1/alerts/topics/{2}/handlers".format(kapacitor_host, kapacitor_port, topic_id)
+                kapacitor_api_topic_handlers_url = "http://{0}:{1}{2}/{3}/handlers".format(kapacitor_host, kapacitor_port, self.KAPACITOR_TOPIC_API_PREFIX, topic_id)
                 http_response = get(kapacitor_api_topic_handlers_url)
                 if http_response.status_code != 200:
                     continue  # if the topic doesn't exist continue with the other triggers
@@ -198,12 +347,12 @@ class AlertsConfigurationAPI(object):
 
                     handler_id = handler["id"]
 
-                    kapacitor_api_handler_url = "http://{0}:{1}/kapacitor/v1/alerts/topics/{2}/handlers/{3}".format(kapacitor_host, kapacitor_port, topic_id, handler_id)
+                    kapacitor_api_handler_url = "http://{0}:{1}{2}/{3}/handlers/{4}".format(kapacitor_host, kapacitor_port, self.KAPACITOR_TOPIC_API_PREFIX, topic_id, handler_id)
                     delete(kapacitor_api_handler_url)
                     handlers.append({"policy": policy_id, "trigger": event_id, "handler": original_http_handler_url})
 
                 # delete alert topic
-                kapacitor_api_topic_url = "http://{0}:{1}/kapacitor/v1/alerts/topics/{2}".format(kapacitor_host, kapacitor_port, topic_id)
+                kapacitor_api_topic_url = "http://{0}:{1}{2}/{3}".format(kapacitor_host, kapacitor_port, self.KAPACITOR_TOPIC_API_PREFIX, topic_id)
                 delete(kapacitor_api_topic_url)
 
         return {"deleted_alerts": alerts, "deleted_handlers": handlers}
@@ -340,7 +489,7 @@ class AlertsConfigurationAPI(object):
         :return: the list for tracking errors while interacting with Kapacitor tasks and the list for tracking errors while interacting with Kapacitor alert handlers
         """
 
-        kapacitor_api_tasks_url = "http://{0}:{1}/kapacitor/v1/tasks".format(kapacitor_host, kapacitor_port)
+        kapacitor_api_tasks_url = "http://{0}:{1}{2}".format(kapacitor_host, kapacitor_port, self.KAPACITOR_TASK_API_PREFIX)
 
         # two lists to keep track of any errors while interacting with the Kapacitor HTTP API
         alert_tasks_errors = []
@@ -460,7 +609,7 @@ class AlertsConfigurationAPI(object):
         :param patch_duplicates: (defaults to False) if set to True, any duplication errors will be handled by first deleting the existing resource and then creating it
         """
 
-        kapacitor_api_handlers_url = "http://{0}:{1}/kapacitor/v1/alerts/topics/{2}/handlers".format(kapacitor_host, kapacitor_port, topic_id)
+        kapacitor_api_handlers_url = "http://{0}:{1}{2}/{3}/handlers".format(kapacitor_host, kapacitor_port, self.KAPACITOR_TOPIC_API_PREFIX, topic_id)
 
         for http_handler_url in http_handlers:
 
diff --git a/src/service/clmcservice/managementapi/__init__.py b/src/service/clmcservice/managementapi/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/service/clmcservice/managementapi/tests.py b/src/service/clmcservice/managementapi/tests.py
new file mode 100644
index 0000000000000000000000000000000000000000..8bf928fa5c8e889415ee5725e3f937b3e05ab1f5
--- /dev/null
+++ b/src/service/clmcservice/managementapi/tests.py
@@ -0,0 +1,89 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          25-06-2019
+//      Created for Project :   FLAME
+"""
+
+
+# Python standard libs
+from unittest.mock import patch, Mock
+
+# PIP installed libs
+import pytest
+from pyramid import testing
+
+# CLMC-service imports
+from clmcservice.managementapi.views import DataManagementAPI
+
+
+class TestDataManagementAPI(object):
+    """
+    A pytest-implementation test for the Data Management API endpoints.
+    """
+
+    @pytest.fixture(autouse=True)
+    def print_fixture(self):
+        """
+        Fixture to adjust the printing format when running pytest with the "-s" flag - by default print messages mix up with pytest's output
+        """
+
+        print()
+
+    @pytest.fixture(autouse=True)
+    def app_config(self):
+        """
+        A fixture to implement setUp/tearDown functionality for all tests by initializing configuration structure for the web service
+        """
+
+        self.config = testing.setUp()
+        self.config.add_settings({"influx_host": "localhost", "influx_port": 8086, "clmc_service_port": 9080})
+
+        yield
+
+        testing.tearDown()
+
+    @patch('clmcservice.managementapi.views.delete')
+    @patch('clmcservice.managementapi.views.InfluxDBClient')
+    def test_delete_all_sfc_data(self, influx_client_mock, http_delete_mock):
+        """
+        Tests the functionality of the 'delete all SFC data' API endpoint.
+        """
+
+        app_settings = self.config.get_settings()
+
+        # mock the responses of the two API calls
+        mock_response1 = Mock()
+        mock_response1.status_code = 200
+        http_delete_mock.return_value = mock_response1
+
+        mock_response2 = Mock()
+        influx_client_mock.return_value = mock_response2
+
+        # send a request and check that the correct API calls are made
+        request = testing.DummyRequest()
+        sfc_id = "test-sfc"
+        request.matchdict["sfc_id"] = sfc_id
+
+        DataManagementAPI(request).delete_all_sfc_data()
+
+        influx_client_mock.assert_called_with(host=app_settings["influx_host"], port=app_settings["influx_port"])
+        http_delete_mock.assert_called_with("http://localhost:{0}/alerts/{1}".format(app_settings["clmc_service_port"], sfc_id))
+        mock_response2.drop_database.assert_called_with(sfc_id)
diff --git a/src/service/clmcservice/managementapi/views.py b/src/service/clmcservice/managementapi/views.py
new file mode 100644
index 0000000000000000000000000000000000000000..384b47a9d92d2f5d20b1f9329b7f395edcefb1c6
--- /dev/null
+++ b/src/service/clmcservice/managementapi/views.py
@@ -0,0 +1,82 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          25-06-2019
+//      Created for Project :   FLAME
+"""
+
+
+# Python standard libs
+import logging
+
+# PIP installed libs
+from influxdb import InfluxDBClient
+from pyramid.view import view_defaults, view_config
+from requests import delete
+
+# initialise logger
+log = logging.getLogger('service_logger')
+
+
+@view_defaults(renderer='json')
+class DataManagementAPI(object):
+    """
+    A class-based view for managing SFC data - time-series measurements and alerts.
+    """
+
+    def __init__(self, request):
+        """
+        Initialises the instance of the view with the request argument.
+
+        :param request: client's call request
+        """
+
+        self.request = request
+
+    @view_config(route_name='data_management_aggregate', request_method='DELETE')
+    def delete_all_sfc_data(self):
+        """
+        The view for deleting all Kapacitor and InfluxDB resources registered for a specific service function chain.
+
+        :return: a JSON message to indicate whether deletion was successful
+        """
+
+        influx_host, influx_port = self.request.registry.settings['influx_host'], self.request.registry.settings['influx_port']
+        clmc_service_port = self.request.registry.settings['clmc_service_port']
+
+        # establish connection with InfluxDB
+        client = InfluxDBClient(host=influx_host, port=influx_port)
+
+        # fetch the URL parameters
+        sfc_id = self.request.matchdict["sfc_id"]
+
+        # delete alerts data
+        alerts_delete_url = "http://localhost:{0}/alerts/{1}".format(clmc_service_port, sfc_id)
+        log.info("Sending DELETE request to CLMC service for URL - {0}".format(alerts_delete_url))
+        alerts_response = delete(alerts_delete_url)
+        log.info("Received status code {0} from CLMC service for URL - {1}".format(alerts_response.status_code, alerts_delete_url))
+
+        assert alerts_response.status_code == 200, "Alerts deletion returned status code different than 200 - {0}".format(alerts_response.text)
+
+        # delete database and time-series data
+        log.info("Deleting database {0}".format(sfc_id))
+        client.drop_database(sfc_id)
+
+        return {"msg": "Time-series data and alerts have been successfully deleted for SFC {0}.".format(sfc_id)}
diff --git a/src/service/development.ini b/src/service/development.ini
index bfae568a67367e7e34cabfccd0236a62f0ad40d0..1b02ebb9d1b9e200a6b05756c36be16a06103ab7 100644
--- a/src/service/development.ini
+++ b/src/service/development.ini
@@ -1,8 +1,3 @@
-###
-# app configuration
-# https://docs.pylonsproject.org/projects/pyramid/en/latest/narr/environment.html
-###
-
 [app:main]
 use = egg:clmcservice
 
@@ -12,9 +7,10 @@ pyramid.debug_notfound = false
 pyramid.debug_routematch = false
 pyramid.default_locale_name = en
 pyramid.includes = pyramid_debugtoolbar pyramid_exclog
+# sets the exclog.ignore property to nothing so that all exceptions are logged (including HTTP Not Found exceptions)
 exclog.ignore =
 
-
+# file paths to the mappings between clusters and service routers (temporary solution)
 network_clusters_path = /opt/clmc/src/service/resources/GraphAPI/network_clusters.json
 network_ues_path = /opt/clmc/src/service/resources/GraphAPI/network_ues.json
 
@@ -24,7 +20,7 @@ network_bandwidth = 10000
 # PostgreSQL connection url
 sqlalchemy.url = postgresql://clmc:clmc_service@localhost:5432/whoamidb
 
-# Influx connection
+# InfluxDB connection
 influx_host = localhost
 influx_port = 8086
 
@@ -36,22 +32,14 @@ kapacitor_port = 9092
 neo4j_host = localhost
 neo4j_password = admin
 
-# By default, the toolbar only appears for clients from IP addresses
-# '127.0.0.1' and '::1'.
-# debugtoolbar.hosts = 127.0.0.1 ::1
+# CLMC service connection - port number is specified here so that the application can access this configuration, but also in the [server:main] config
+clmc_service_port = 9080
 
-###
-# wsgi server configuration
-###
 
 [server:main]
 use = egg:waitress#main
 listen = localhost:9080
 
-###
-# logging configuration
-# https://docs.pylonsproject.org/projects/pyramid/en/latest/narr/logging.html
-###
 
 [loggers]
 keys = root, exc_logger, service_logger, sqlalchemy.engine.base.Engine
diff --git a/src/service/production.ini b/src/service/production.ini
index eb5577e32d8261f0b05fa98109a1df42efa9e9b3..9cd9e98b553f85f9bbbaafc0af0c092ee9db8fe0 100644
--- a/src/service/production.ini
+++ b/src/service/production.ini
@@ -1,8 +1,3 @@
-###
-# app configuration
-# https://docs.pylonsproject.org/projects/pyramid/en/latest/narr/environment.html
-###
-
 [app:main]
 use = egg:clmcservice
 
@@ -12,9 +7,10 @@ pyramid.debug_notfound = false
 pyramid.debug_routematch = false
 pyramid.default_locale_name = en
 pyramid.includes = pyramid_exclog
+# sets the exclog.ignore property to nothing so that all exceptions are logged (including HTTP Not Found exceptions)
 exclog.ignore =
 
-
+# file paths to the mappings between clusters and service routers (temporary solution)
 network_clusters_path = /opt/clmc/src/service/resources/GraphAPI/network_clusters.json
 network_ues_path = /opt/clmc/src/service/resources/GraphAPI/network_ues.json
 
@@ -24,7 +20,7 @@ network_bandwidth = 10000
 # PostgreSQL connection url
 sqlalchemy.url = postgresql://clmc:clmc_service@localhost:5432/whoamidb
 
-# Influx connection
+# InfluxDB connection
 influx_host = localhost
 influx_port = 8086
 
@@ -36,18 +32,14 @@ kapacitor_port = 9092
 neo4j_host = localhost
 neo4j_password = admin
 
-###
-# wsgi server configuration
-###
+# CLMC service connection - port number is specified here so that the application can access this configuration, but also in the [server:main] config
+clmc_service_port = 9080
+
 
 [server:main]
 use = egg:waitress#main
 listen = *:9080
 
-###
-# logging configuration
-# https://docs.pylonsproject.org/projects/pyramid/en/latest/narr/logging.html
-###
 
 [loggers]
 keys = root, exc_logger, service_logger, sqlalchemy.engine.base.Engine
diff --git a/src/service/resources/GraphAPI/network_config.json b/src/service/resources/GraphAPI/network_config.json
deleted file mode 100644
index 4af2f1b0b6adce7f9660c3b16a84eb6a60d7bd05..0000000000000000000000000000000000000000
--- a/src/service/resources/GraphAPI/network_config.json
+++ /dev/null
@@ -1,89 +0,0 @@
-{
-  "bandwidth": 104857600,
-  "links": [
-    {
-      "source": "20-sr1-cluster1-cluster",
-      "target": "22-sr1-cluster1-cluster",
-      "min_response_time": 3.427,
-      "avg_response_time": 3.737,
-      "max_response_time": 4.281
-    },
-    {
-      "source": "20-sr1-cluster1-cluster",
-      "target": "23-sr1-cluster1-cluster",
-      "min_response_time": 4.302,
-      "avg_response_time": 4.692,
-      "max_response_time": 5.463
-    },
-    {
-      "source": "20-sr1-cluster1-cluster",
-      "target": "24-sr1-cluster1-cluster",
-      "min_response_time": 3.597,
-      "avg_response_time": 3.974,
-      "max_response_time": 4.432
-    },
-    {
-      "source": "22-sr1-cluster1-cluster",
-      "target": "20-sr1-cluster1-cluster",
-      "min_response_time": 3.338,
-      "avg_response_time": 4.115,
-      "max_response_time": 4.735
-    },
-        {
-      "source": "22-sr1-cluster1-cluster",
-      "target": "23-sr1-cluster1-cluster",
-      "min_response_time": 3.780,
-      "avg_response_time": 4.658,
-      "max_response_time": 10.347
-    },
-    {
-      "source": "22-sr1-cluster1-cluster",
-      "target": "24-sr1-cluster1-cluster",
-      "min_response_time": 3.436,
-      "avg_response_time": 5.253,
-      "max_response_time": 13.881
-    },
-    {
-      "source": "23-sr1-cluster1-cluster",
-      "target": "20-sr1-cluster1-cluster",
-      "min_response_time": 3.311,
-      "avg_response_time": 3.379,
-      "max_response_time": 4.031
-    },
-    {
-      "source": "23-sr1-cluster1-cluster",
-      "target": "22-sr1-cluster1-cluster",
-      "min_response_time": 3.230,
-      "avg_response_time": 5.950,
-      "max_response_time": 15.286
-    },
-        {
-      "source": "23-sr1-cluster1-cluster",
-      "target": "24-sr1-cluster1-cluster",
-      "min_response_time": 3.117,
-      "avg_response_time": 4.934,
-      "max_response_time": 12.666
-    },
-    {
-      "source": "24-sr1-cluster1-cluster",
-      "target": "20-sr1-cluster1-cluster",
-      "min_response_time": 3.494,
-      "avg_response_time": 4.326,
-      "max_response_time": 4.916
-    },
-    {
-      "source": "24-sr1-cluster1-cluster",
-      "target": "22-sr1-cluster1-cluster",
-      "min_response_time": 3.273,
-      "avg_response_time": 5.052,
-      "max_response_time": 11.490
-    },
-    {
-      "source": "24-sr1-cluster1-cluster",
-      "target": "23-sr1-cluster1-cluster",
-      "min_response_time": 2.807,
-      "avg_response_time": 4.652,
-      "max_response_time": 12.504
-    }
-  ]
-}
\ No newline at end of file
diff --git a/src/test/VERSION b/src/test/VERSION
index 1f8197ce8a3b7dcfa3937a90ffee1db776aaace7..59124ba3ed3e948992d5cf837750ed347b4f46db 100644
--- a/src/test/VERSION
+++ b/src/test/VERSION
@@ -1 +1 @@
-__version__ = "2.2.0"
\ No newline at end of file
+__version__ = "2.3.0"
\ No newline at end of file
diff --git a/src/test/clmctest/alerts/test_alerts.py b/src/test/clmctest/alerts/test_alerts.py
index fcffc57631d3db40e90e9c7c1ca65906b458cd0e..ba7a96f1aab913862ab9391f306aa4a0a1376756 100644
--- a/src/test/clmctest/alerts/test_alerts.py
+++ b/src/test/clmctest/alerts/test_alerts.py
@@ -23,6 +23,8 @@
 """
 import datetime
 from time import sleep, strptime
+
+from influxdb import InfluxDBClient
 from requests import post, get, delete, put
 from os import listdir
 from os.path import join, dirname
@@ -34,6 +36,8 @@ from clmctest.alerts.alert_handler_server import LOG_TEST_FOLDER_PATH
 NGINX_PORT = 80
 SFEMC = "flame_sfemc"
 
+sfc, sfc_instance = "MS_Template_1", "MS_Template_1_1"
+
 
 def is_valid_timestamp(str_timestamp):
     try:
@@ -109,6 +113,8 @@ class TestAlerts(object):
             if clmc_service_host is not None and nginx_host is not None:
                 break
 
+        assert clmc_service_host is not None and nginx_host is not None
+
         # create the alerts with a POST request
         print("Sending alerts specification to clmc service...")
         alerts_spec = join(dirname(__file__), "alerts_test_config.yaml")
@@ -119,28 +125,17 @@ class TestAlerts(object):
                 files = {'alert-spec': alerts, 'resource-spec': resources}
                 response = post("http://{0}/clmc-service/alerts".format(clmc_service_host), files=files)
 
-        assert response.status_code == 200
+        assert response.status_code == 200, response.text
         clmc_service_response = response.json()
         assert "triggers_specification_errors" not in clmc_service_response, "Unexpected error was returned for triggers specification"
         assert "triggers_action_errors" not in clmc_service_response, "Unexpected error was returned for handlers specification"
-
-        sfc, sfc_instance = "MS_Template_1", "MS_Template_1_1"
         assert (sfc, sfc_instance) == (clmc_service_response["service_function_chain_id"], clmc_service_response["service_function_chain_instance_id"])
         print("Alert spec sent successfully")
 
         # check that the alerts can be fetched with a GET request
         print("Validate that the alerts were registered and can be fetched with a GET request.")
-        response = get("http://{0}/clmc-service/alerts/{1}/{2}".format(clmc_service_host, sfc, sfc_instance))
-        assert response.status_code == 200
-        clmc_service_response = response.json()
-        clmc_service_response = sorted(clmc_service_response, key=lambda x: x["trigger"])  # sort by trigger so that the response can be compared to what's expected
 
-        # sort the handlers of returned alerts to ensure comparison order is correct
-        for alert in clmc_service_response:
-            alert["handlers"] = sorted(alert["handlers"])
-
-        # compare actual response with expected response
-        assert clmc_service_response == [
+        expected_alerts_list = [
             {"policy": "scale_nginx_policy", "trigger": "high_requests", "task_identifier": "46fb8800c8a5eeeb04b090d838d475df574a2e6d854b5d678fc981c096eb6c1b",
              "handlers": ["http://172.40.231.200:9999/"],
              "topic_identifier": "46fb8800c8a5eeeb04b090d838d475df574a2e6d854b5d678fc981c096eb6c1b",
@@ -165,7 +160,35 @@ class TestAlerts(object):
              "task_api_endpoint": "/kapacitor/v1/tasks/f7dab6fd53001c812d44533d3bbb6ef45f0d1d39b9441bc3c60402ebda85d320",
              "topic_api_endpoint": "/kapacitor/v1/alerts/topics/f7dab6fd53001c812d44533d3bbb6ef45f0d1d39b9441bc3c60402ebda85d320",
              "topic_handlers_api_endpoint": "/kapacitor/v1/alerts/topics/f7dab6fd53001c812d44533d3bbb6ef45f0d1d39b9441bc3c60402ebda85d320/handlers"}
-        ], "Incorrect response for GET alerts request"
+        ]
+
+        response = get("http://{0}/clmc-service/alerts/{1}/{2}".format(clmc_service_host, sfc, sfc_instance))
+        assert response.status_code == 200, response.text
+        clmc_service_response = response.json()
+        actual_alerts_list = clmc_service_response
+        actual_alerts_list = sorted(actual_alerts_list, key=lambda x: x["trigger"])  # sort by trigger so that the response can be compared to what's expected
+
+        # sort the handlers of returned alerts to ensure comparison order is correct
+        for alert in actual_alerts_list:
+            alert["handlers"] = sorted(alert["handlers"])
+
+        # compare actual response with expected response
+        assert actual_alerts_list == expected_alerts_list, "Incorrect response for GET alerts request"
+
+        response = get("http://{0}/clmc-service/alerts/{1}".format(clmc_service_host, sfc))
+        assert response.status_code == 200, response.text
+        clmc_service_response = response.json()
+        assert clmc_service_response.keys() == {sfc_instance}, "Expecting only one instance of this SFC"
+        actual_alerts_list = clmc_service_response[sfc_instance]
+        actual_alerts_list = sorted(actual_alerts_list, key=lambda x: x["trigger"])  # sort by trigger so that the response can be compared to what's expected
+
+        # sort the handlers of returned alerts to ensure comparison order is correct
+        for alert in actual_alerts_list:
+            alert["handlers"] = sorted(alert["handlers"])
+
+        # compare actual response with expected response
+        assert actual_alerts_list == expected_alerts_list, "Incorrect response for GET alerts request"
+
         print("Alert spec validated successfully")
 
         print("Wait 10 seconds for Kapacitor stream/batch tasks to start working...")
@@ -198,7 +221,7 @@ class TestAlerts(object):
 
             assert valid, "Alert log content is invalid - {0}".format(alert_log_path)
 
-        # delete the alerts with a DELETE request
+        # delete the alerts with a DELETE request through the TOSCA specification
         with open(alerts_spec, 'rb') as alerts:
             files = {'alert-spec': alerts}
             response = delete("http://{0}/clmc-service/alerts".format(clmc_service_host), files=files)
@@ -236,6 +259,7 @@ class TestAlerts(object):
             if host["name"] == "clmc-service":
                 clmc_service_host = host["ip_address"]
                 break
+        assert clmc_service_host is not None
 
         # create the alerts with a POST request
         print("Sending alerts specification to clmc service...")
@@ -246,11 +270,10 @@ class TestAlerts(object):
             with open(resources_spec, 'rb') as resources:
                 files = {'alert-spec': alerts, 'resource-spec': resources}
                 response = post("http://{0}/clmc-service/alerts".format(clmc_service_host), files=files)
-        assert response.status_code == 200
+        assert response.status_code == 200, response.text
         clmc_service_response = response.json()
         assert "triggers_specification_errors" not in clmc_service_response, "Unexpected error was returned for triggers specification"
         assert "triggers_action_errors" not in clmc_service_response, "Unexpected error was returned for handlers specification"
-        sfc, sfc_instance = "MS_Template_1", "MS_Template_1_1"
         assert (sfc, sfc_instance) == (clmc_service_response["service_function_chain_id"], clmc_service_response["service_function_chain_instance_id"])
         print("Alert spec sent successfully")
 
@@ -270,11 +293,10 @@ class TestAlerts(object):
             with open(resources_spec, 'rb') as resources:
                 files = {'alert-spec': alerts, 'resource-spec': resources}
                 response = put("http://{0}/clmc-service/alerts".format(clmc_service_host), files=files)
-        assert response.status_code == 200
+        assert response.status_code == 200, response.text
         clmc_service_response = response.json()
         assert "triggers_specification_errors" not in clmc_service_response, "Unexpected error was returned for triggers specification"
         assert "triggers_action_errors" not in clmc_service_response, "Unexpected error was returned for handlers specification"
-        sfc, sfc_instance = "MS_Template_1", "MS_Template_1_1"
         assert (sfc, sfc_instance) == (clmc_service_response["service_function_chain_id"], clmc_service_response["service_function_chain_instance_id"])
         print("Alert spec updated successfully")
 
@@ -287,10 +309,80 @@ class TestAlerts(object):
         print("Latest timestamp during the POST request", max_post_timestamp, "Earliest timestamp during the PUT request", min_put_timestamp)
         assert min_put_timestamp - max_post_timestamp >= delay, "There is an alert that wasn't updated properly with a PUT request"
 
-        # delete the alerts with a DELETE request
+        # delete the alerts with a DELETE request for the SFC instance
+        response = delete("http://{0}/clmc-service/alerts/{1}/{2}".format(clmc_service_host, sfc, sfc_instance))
+        assert response.status_code == 200, "Incorrect status code returned after deleting the alert specification"
+        assert response.json() == {"deleted_alerts_count": 4}
+
+        # create the alerts again so that they can be deleted with another endpoint
         with open(alerts_spec, 'rb') as alerts:
-            files = {'alert-spec': alerts}
-            delete("http://{0}/clmc-service/alerts".format(clmc_service_host), files=files)
+            with open(resources_spec, 'rb') as resources:
+                files = {'alert-spec': alerts, 'resource-spec': resources}
+                response = post("http://{0}/clmc-service/alerts".format(clmc_service_host), files=files)
+        assert response.status_code == 200, response.text
+
+        # delete the alerts with a DELETE request for all SFC instances
+        response = delete("http://{0}/clmc-service/alerts/{1}".format(clmc_service_host, sfc))
+        assert response.status_code == 200, "Incorrect status code returned after deleting the alert specification"
+        assert response.json() == {sfc_instance: {"deleted_alerts_count": 4}}
+
+
+def test_data_deletion(rspec_config):
+    """
+    Tests the deletion of data for a given SFC.
+    """
+
+    clmc_service_host = None
+    for host in rspec_config:
+        if host["name"] == "clmc-service":
+            clmc_service_host = host["ip_address"]
+            break
+    assert clmc_service_host is not None
+
+    influx_client = InfluxDBClient(host=clmc_service_host)
+
+    # create the alerts with a POST request
+    print("Sending alerts specification to clmc service...")
+    alerts_spec = join(dirname(__file__), "alerts_test_config.yaml")
+    resources_spec = join(dirname(__file__), "resources_test_config.yaml")
+
+    with open(alerts_spec, 'rb') as alerts:
+        with open(resources_spec, 'rb') as resources:
+            files = {'alert-spec': alerts, 'resource-spec': resources}
+            response = post("http://{0}/clmc-service/alerts".format(clmc_service_host), files=files)
+    assert response.status_code == 200, response.text
+    clmc_service_response = response.json()
+    assert "triggers_specification_errors" not in clmc_service_response, "Unexpected error was returned for triggers specification"
+    assert "triggers_action_errors" not in clmc_service_response, "Unexpected error was returned for handlers specification"
+    assert (sfc, sfc_instance) == (clmc_service_response["service_function_chain_id"], clmc_service_response["service_function_chain_instance_id"])
+    print("Alert spec sent successfully")
+
+    print("Sending delete request for CLMC data of this SFC")
+    response = delete("http://{0}/clmc-service/data/{1}".format(clmc_service_host, sfc))
+    assert response.status_code == 200, "Incorrect status code returned from data delete request"
+    assert response.json() == {"msg": "Time-series data and alerts have been successfully deleted for SFC {0}.".format(sfc)}, "Incorrect message returned from data delete request"
+
+    print("Checking that alerts have been deleted")
+    response = get("http://{0}/clmc-service/alerts/{1}".format(clmc_service_host, sfc))
+    assert response.status_code == 200
+    assert response.json() == {}, "Alerts have not been deleted"
+
+    print("Creating test database for deletion")
+    test_db_name = "test-database"
+    influx_client.create_database(test_db_name)
+    print("Checking that test database has been created")
+    assert test_db_name in set((database["name"] for database in influx_client.get_list_database())), "Test database could not be created"
+
+    print("Deleting test database through data delete request")
+    response = delete("http://{0}/clmc-service/data/{1}".format(clmc_service_host, test_db_name))
+    assert response.status_code == 200, "Incorrect status code returned from data delete request"
+    assert response.json() == {"msg": "Time-series data and alerts have been successfully deleted for SFC {0}.".format(test_db_name)}, "Incorrect message returned from data delete request"
+
+    print("Checking that test database has been deleted")
+    # get the list of databases and check that the SFC id is not in it
+    assert test_db_name not in set((database["name"] for database in influx_client.get_list_database())), "Test database has not been deleted"
+
+    print("SFC data has been successfully deleted")
 
 
 def tasks_timestamps(all_tasks, sfc_id, sfc_instance_id):