diff --git a/.gitignore b/.gitignore
index 0694a2d4ec36f44d8d015249cf38915cb469b6fa..1cfef0b4e51e134bfd0ce20a2e4f0b80fae52586 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,7 +5,7 @@
 *egg-info*
 *git-commit-ref*
 *_version.py*
-ubuntu-xenial-16.04-cloudimg-console.log
+ubuntu-bionic-18.04-cloudimg-console.log
 .idea/
 *.egg
 *.pyc
diff --git a/docs/AlertsSpecification.md b/docs/AlertsSpecification.md
new file mode 100644
index 0000000000000000000000000000000000000000..e4e1cc78d2501ac79baec43bb11594b2eba0a826
--- /dev/null
+++ b/docs/AlertsSpecification.md
@@ -0,0 +1,363 @@
+<!--
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road, 
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          15-08-2018
+//      Created for Project :   FLAME
+-->
+
+# **FLAME - TOSCA Alerts Specification**
+
+#### **Authors**
+
+|Authors|Organisation|                    
+|:---:|:---:|  
+|[Nikolay Stanchev](mailto:ns17@it-innovation.soton.ac.uk)|[University of Southampton, IT Innovation Centre](http://www.it-innovation.soton.ac.uk)|
+|[Michael Boniface](mailto:mjb@it-innovation.soton.ac.uk)|[University of Southampton, IT Innovation Centre](http://www.it-innovation.soton.ac.uk)|
+
+#### Description
+
+This document outlines the TOSCA alert specification used to configure alerts within CLMC. Alerts are configured through a YAML-based
+TOSCA-compliant document according to the TOSCA simple profile. This document is passed to the CLMC service, which parses and validates the document. 
+Subsequently, the CLMC service creates and activates the alerts within Kapacitor, then registers the HTTP alert handlers specified in the document.
+The specification is compliant with the TOSCA policy template as implemented by the Openstack tosca parser. See an example below:
+
+https://github.com/openstack/tosca-parser/blob/master/toscaparser/tests/data/policies/tosca_policy_template.yaml
+
+#### TOSCA Alerts Specification Document
+
+The TOSCA Alerts Specification Document consists of two main sections - **metadata** and **policies**. Each **policy** contains a number
+of triggers. A **trigger** is a fully qualified specification for an alert. Full definitions and clarification of the structure of the document
+is given in the following sections. An example of a valid alert specification
+document will look like:
+
+```yaml
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+topology_template:
+
+  policies:
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          high_latency:
+            description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+            event_type: threshold
+            metric: network.latency
+            condition:
+              threshold: 45
+              granularity: 120
+              aggregation_method: mean
+              resource_type:
+                flame_location: watershed
+              comparison_operator: gt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/high-latency
+                
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          low_requests:
+            description: |
+              This event triggers when the last reported number of requests for a given service function
+              falls behind a given threshold.
+            event_type: threshold
+            metric: storage.requests
+            condition:
+              threshold: 5
+              granularity: 60
+              aggregation_method: last
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                location: watershed
+              comparison_operator: lt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/low-requests
+                
+    - requests_diff_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          increase_in_requests:
+            description: |
+              This event triggers when the number of requests has increased relative to the number of requests received
+              120 seconds ago.
+            event_type: relative
+            metric: storage.requests
+            condition:
+              threshold: 100  # requests have increased by at least 100
+              granularity: 120
+              resource_type:
+                flame_sfc: companyA-VR
+                flame_sfci: companyA-VR-premium
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_server: watershed
+                flame_location: watershed
+              comparison_operator: gte
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+          decrease_in_requests:
+            description: |
+              This event triggers when the number of requests has decreased relative to the number of requests received
+              120 seconds ago.
+            event_type: relative
+            metric: storage.requests
+            condition:
+              threshold: -100  # requests have decreased by at least 100
+              granularity: 120
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: lte
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                
+    - missing_measurement_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          missing_storage_measurements:
+            description: This event triggers when the number of storage measurements reported falls below the threshold value.
+            event_type: deadman
+            # deadman trigger instances monitor the whole measurement (storage in this case), so simply put a star for field value
+            # to be compliant with the <measurement>.<field> format
+            metric: storage.*
+            condition:
+              threshold: 0  # if requests are less than or equal to 0 (in other words, no measurements are reported)
+              granularity: 60  # check for for missing data for the last 60 seconds
+              resource_type:
+                flame_sfp: storage
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+```
+
+
+##### Metadata
+
+The ***metadata*** section specifies the service function chain ID and the service function chain instance ID, for which this 
+alerts specification relates to. The format is the following:
+
+```yaml
+metadata:
+    sfc: <sfc_id>
+    sfci: <sfc_i_id>
+```
+
+##### Policies
+
+The ***policies*** section defines a list of policy nodes, each representing a fully qualified configuration for an
+alert within CLMC. The format is the following:
+
+```yaml
+topology_template:
+
+    policies:
+        - <policy_identifier>:
+            type: eu.ict-flame.policies.StateChange
+            triggers:
+                <event identifier>:
+                  description: <optional description for the given event trigger>
+                  event_type: <threshold | relative | deadman>
+                  metric: <measurement>.<field>
+                  condition:
+                    threshold: <critical value - semantics depend on the event type>
+                    granularity: <period in seconds - semantic depends on the event type>
+                    aggregation_method: <aggregation function supported by InfluxDB - e.g. 'mean'>
+                    resource_type:
+                      <CLMC Information Model Tag Name>: <CLMC Information Model Tag Value>
+                      <CLMC Information Model Tag Name>: <CLMC Information Model Tag Value>
+                      ...
+                    comparison_operator: <logical operator to use for comparison, e.g. 'gt', 'lt', 'gte', etc.
+                  action:
+                    implementation:
+                      - <HTTP Alert Handler URL - receives POST messages from Kapacitor when alerts trigger>
+                      - <HTTP Alert Handler URL - receives POST messages from Kapacitor when alerts trigger>
+                      ...
+        ...
+```
+
+
+##### Definitions
+
+* **policy_identifier** - policy label which MUST match with a StateChange policy in the TOSCA resource specification document
+submitted to the FLAME Orchestrator.
+
+* **event_identifier** - the name of the event that **MUST** match with the *constraint* event name referenced in the TOSCA resource
+specification document submitted to the FLAME Orchestrator.
+
+* **event_type** - the type of TICK Script template to use to create the alert - more information will be provided about 
+the different options here, but we assume the most common one will be **threshold**. Currently, the other supported types are 
+**relative** and **deadman**.
+
+* **metric** - the metric to query in InfluxDB, must include measurement name and field name in 
+format `<measurement>`.`<field>`. The only exception is when a **deadman** event type is used - then the `<field>`is not used, but
+the format is still the same for consistency. Therefore, using `<measurement>.*` will be sufficient.
+
+* **threshold** -
+    * for **threshold** event type, this is the critical value the queried metric is compared to.
+    * for **relative** event type, this is the critical value the difference (between the current metric value and the past metric value) is compared to.
+    * for **deadman** event type, this is the critical value the number of measurement points (received in InfluxDB) is compared to.
+
+* **granularity** - period in seconds
+    * for **threshold** event type, this value specifies how often should Kapacitor query InfluxDB to check whether the alert condition is true.
+    * for **relative** event type, this value specifies how long back in time to compare the current metric value with
+    * for **deadman** event type, this value specifies how long the span in time (in which the number of measurement points are checked) is 
+
+* **aggregation_method** - the function to use when querying InfluxDB, e.g. median, mean, etc. This value is only used when
+the event_type is set to **threshold**.
+
+* **resource_type** - provides context for the given event - key-value pairs for the global tags of the CLMC Information Model.
+This includes any of the following: `"flame_sfc", "flame_sfci", "flame_sfp", "flame_sf", "flame_sfe", "flame_server", "flame_location"`. 
+Keep in mind that filtering for **sfc** and **sfci** is automatically generated by extracting the metadata values from the alerts specification.
+For more information on the global tags, please check the [documentation](monitoring.md).  
+
+* **comparison_operator** - the logical operator to use for comparison - lt (less than), gt (greater than), lte (less than or equal to), etc.
+
+* **implementation** - a list of the URLs of alert handlers to which alert data is sent when the event condition is true.
+
+
+##### Event types
+
+* **threshold** - A threshold event type is an alert in which Kapacitor queries InfluxDB on specific metric in a given period of time
+by using a query function such as *mean*, *median*, *mode*, etc. This value is then compared against a given threshold. If the
+result of the comparison operation is true, an alert is triggered. For example:
+
+    ```yaml
+    high_latency:
+        description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+        event_type: threshold
+        metric: network.latency
+        condition:
+          threshold: 45
+          granularity: 120
+          aggregation_method: mean
+          resource_type:
+            flame_location: watershed
+          comparison_operator: gt
+        action:
+          implementation:
+            - http://sfemc.flame.eu/notify
+            - http://companyA.alert-handler.flame.eu/high-latency
+    ``` 
+    
+    This trigger specification will create an alert task in Kapacitor, which queries the **latency** field in the **network**
+    measurement on location **watershed** every **120** seconds and compares the mean value for the last 120 seconds with the threshold value **45**.
+    If the mean latency exceeds 45 (**gt** operator is used, which stands for **greater than**), an alert is triggered. This alert will
+    be sent through an HTTP POST message to the URLs listed in the **implementation** section.
+    
+    The currently included InfluxQL functions are:
+    
+    `"count", "mean", "median", "mode", "sum", "first", "last", "max", "min"`
+    
+    The comparison operator mappings are as follows:
+    
+    ```
+    "lt" : "less than",
+    "gt" : "greater than", 
+    "lte" : "less than or equal to", 
+    "gte" : "greater than or equal to", 
+    "eq" : "equal", 
+    "neq" : "not equal"
+    ```
+
+* **relative** - A relative event type is an alert in which Kapacitor computes the difference between the current value of a metric and the value
+reported a given period of time ago. The difference between the current and the past value is then compared against a given
+threshold. If the result of the comparison operation is true, an alert is triggered. For example:
+
+    ```yaml
+    decrease_in_requests:
+        description: |
+          This event triggers when the number of requests has decreased relative to the number of requests received
+          120 seconds ago.
+        event_type: relative
+        metric: storage.requests
+        condition:
+          threshold: -100
+          granularity: 120
+          resource_type:
+            flame_sfp: storage
+            flame_sf: storage-users
+            flame_location: watershed
+          comparison_operator: lte
+        action:
+          implementation:
+            - http://sfemc.flame.eu/notify
+    ```
+    
+    This trigger specification will create an alert task in Kapacitor, which compares every **requests** value reported in 
+    measurement **storage** with the value received **120** seconds ago. If the difference between the current and the past
+    value is less than or equal to (comparison operator is **lte**) **-100**, an alert is triggered. Simply explained, an alert
+    is triggered if the **requests** current value has decreased by at least 100 relative to the value reported 120 seconds ago.
+    The queried value is contextualised for service function **storage-users** (using service function package **storage**) 
+    at location **watershed**. Triggered alerts will be sent through an HTTP POST message to the URLs listed in the **implementation** section.
+    
+    *Notes*:
+    
+    * **aggregation_method** is not required here - the alert task compares the actual value that's being reported (stream mode)
+    * if **aggregation_method** is provided, it will be ignored
+
+* **deadman** - A deadman event type is an alert in which Kapacitor computes the number of reported points in a measurement
+for a given period of time. This number is then compared to a given threshold value. If less number of points have been 
+reported (in comparison with the threshold value), an alert is triggered.
+For example:
+
+    ```yaml
+    missing_storage_measurements:
+        description: This event triggers when the number of storage measurements reported falls below the threshold value.
+        event_type: deadman
+        metric: storage.*
+        condition:
+          threshold: 0
+          granularity: 60
+          resource_type:
+            flame_sfp: storage
+        action:
+          implementation:
+            - http://sfemc.flame.eu/notify
+    ```
+
+    This trigger specification will create an alert task in Kapacitor, which monitors the number of points reported in
+    measurement **storage** and having tag **sfp** set as **storage**. This value is computed every 60 seconds.
+    If the number of reported points is less than **0** (no points have been reported for the last 60 seconds), an alert
+    will be triggered. Triggered alerts will be sent through an HTTP POST message to the URLs listed in the **implementation** section.
+    
+    *Notes*:
+    
+    * **metric** only requires the measurement name in this event type and doesn't require a field name
+    * the trigger specification still needs to be consistent with the parsing rule for **metric**: `<measurement>`.`<field>`
+    * simply putting a `*` for field is sufficient, e.g. `storage.*`
+    * even if you put something else for field value, it will be ignored - only the **measurement** name is used
+    * **aggregation_method** is not required in this event type, any values provided will be ignored
+    * **comparison operator** is not required in this event type, any values provided will be ignored
diff --git a/docs/clmc-information-model.md b/docs/clmc-information-model.md
index c338f8eaf5336100c0378dca96abc575bbe8f112..a579b8c7460dbd332e544f8caf8743e9eec2b909 100644
--- a/docs/clmc-information-model.md
+++ b/docs/clmc-information-model.md
@@ -254,13 +254,13 @@ The measurement model considers three monitoring views on an endpoint with field
 
 All of the measurements on a endpoint share a common context that includes tag values:
 
-* sfc – an orchestration template
-* sfc_i – an instance of the orchestration template
-* sf – a SF type
-* sf_i – an instance of the SF type
-* ipendpoint – an authoritive copy of the SF instance either VM or container
-* server – a physical or virtual server for hosting VM or container instances
-* location – the location of the server
+* flame_sfc – an orchestration template (a.k.a. Service Function Chain)
+* flame_sfci – an instance of the orchestration template (a.k.a. Service Function Chain Instance)
+* flame_sfp – the package a service function is using (a.k.a. Service Function Package)
+* flame_sf – the service function defined in the TOSCA resource specification (a.k.a. Service Function)
+* flame_sfe – an authoritative copy of the SF - either VM or container (a.k.a. Service Function Endpoint)
+* flame_server – a cluster VM inside which service function endpoints are placed (at the current stage, the value of this tag is the same as the *location* tag)
+* flame_location – the location of the server - physical machine that hosts the cluster VM
 
 By including this context with service, network and host measurements it is possible to support range of temporal queries associated with SFC’s. By adopting the same convention for identifiers it is possible to combine measurements across service, network and host to create new series that allows exploration of different aspects of the VM instance, including cross-layer queries.
 
@@ -368,13 +368,13 @@ ipendpoint measurements measure the configuration, usage and performance of VM/C
 
 Common tags
 
-* location – a physical or virtual server for hosting nodes instances
-* server – the location of the server
-* sfc – an orchestration template
-* sfc_i – an instance of the orchestration template
-* sf – a SF package identifier indicating the type and version of SF
-* sf_i – an instance of the SF type
-* ipendpoint – an authoritive copy of the SF instance either a container or VM
+* flame_sfc – an orchestration template (a.k.a. Service Function Chain)
+* flame_sfci – an instance of the orchestration template (a.k.a. Service Function Chain Instance)
+* flame_sfp – the package a service function is using (a.k.a. Service Function Package)
+* flame_sf – the service function defined in the TOSCA resource specification (a.k.a. Service Function)
+* flame_sfe – an authoritative copy of the SF - either VM or container (a.k.a. Service Function Endpoint)
+* flame_server – a cluster VM inside which service function endpoints are placed (at the current stage, the value of this tag is the same as the *location* tag)
+* flame_location – the location of the server - physical machine that hosts the cluster VM
 
 **endpoint_config**
 
diff --git a/docs/clmc-service.md b/docs/clmc-service.md
index ed81c97dce20a74be753db24c2967cc5435dc241..e1b21a49f155c68929a5f95766c413fa0bea8d0f 100644
--- a/docs/clmc-service.md
+++ b/docs/clmc-service.md
@@ -25,16 +25,137 @@
 
 #### **Authors**
 
-|Authors|Organisation|
-|---|---|
+|Authors|Organisation|                    
+|:---:|:---:|  
 |[Nikolay Stanchev](mailto:ns17@it-innovation.soton.ac.uk)|[University of Southampton, IT Innovation Centre](http://www.it-innovation.soton.ac.uk)|
 
 #### Description
 
-This document describes the CLMC service and its API endpoints. The CLMC service is implemented in the *Python* framework called **Pyramid**.
-It offers different API endpoints to configure and control the aggregator as well as a CRUD API for service function endpoints configuration data and Graph API for calculating round trip time. All source code, tests and configuration files of the service can be found in the **src/service** folder.
+This document describes the CLMC service and its API endpoints. The CLMC service is implemented using the *Python* web framework called **Pyramid**.
+It offers different API endpoints such as GraphAPI for calculating round trip time, CRUD API for service function endpoints 
+configuration data and Alerts API for creating and subscribing to alerts in Kapacitor. All source code, tests and 
+configuration files of the service can be found in the **src/service** folder.
 
 
+## Alerts API Endpoints
+
+* **GET** ***/alerts?sfc={service function chain id}&sfci={service function chain instance id}&policy={policy id}&trigger={trigger id}***
+
+    This API method can be used to retrieve the generated alert task and alert topic identifiers during the processing of an alerts specification document.
+    These identifiers can then be used to interact with the Kapacitor HTTP API for further configiuration or modification of alerts - https://docs.influxdata.com/kapacitor/v1.4/working/api/.
+
+    * Request:
+    
+        Expects a URL query string with the request parameters - **sfc**, **sfci**, **policy** and **trigger**. The given parameters must match the values used in the alerts specification
+        document. Otherwise, a wrong ID will be returned.
+        
+    * Request URL Examples:
+    
+        **/alerts?sfc=MSDemo&sfci=MSDemo-premium&policy=requests_diff&trigger=low_requests**
+        
+        **/alerts?sfc=SimpleMediaService&sfci=SimpleMediaService-1&policy=rtt_deviation&trigger=increase_in_rtt**
+     
+    * Response
+    
+        The response of this request is a JSON-formatted content, which contains the task and topic identifiers, along with the Kapacitor
+        API endpoints to use for configuring the given task, topic and the respective handlers.
+        
+        Returns a 400 Bad Request if the URL query string parameters are invalid or otherwise incorrect.
+        
+    * Response Body Example:
+    
+        ```json
+        {
+            "task_identifier": "094f23d6e948c78e9fa215528973fb3aeefa5525898626c9ea049dc8e87a7388",
+            "topic_identifier": "094f23d6e948c78e9fa215528973fb3aeefa5525898626c9ea049dc8e87a7388",
+            "task_api_endpoint": "/kapacitor/v1/tasks/094f23d6e948c78e9fa215528973fb3aeefa5525898626c9ea049dc8e87a7388",
+            "topic_api_endpoint": "/kapacitor/v1/alerts/topics/094f23d6e948c78e9fa215528973fb3aeefa5525898626c9ea049dc8e87a7388",
+            "topic_handlers_api_endpoint": "/kapacitor/v1/alerts/topics/094f23d6e948c78e9fa215528973fb3aeefa5525898626c9ea049dc8e87a7388/handlers"
+        }
+        ```
+
+* **POST** ***/alerts***
+
+    This API method can be used to send an alert specification document, which is then used by the CLMC service to create
+    alert tasks and subscribe alert handlers to those tasks in Kapacitor. For further information on the alert specification
+    document, please check the [CLMC Alert Specification Documentation](AlertsSpecification.md).
+
+    * Request:
+        
+        Expects two YAML-formatted files in the request - one referenced with ID ***alert-spec*** representing the TOSCA alert specification 
+        document and one referenced with ID ***resource-spec*** representing the TOSCA resource specification document. 
+        The alert specification document is then parsed with the openstack TOSCA parser (https://github.com/openstack/tosca-parser/tree/master/toscaparser)
+        and validated against the CLMC alerts specification schema (again check [documentation](AlertsSpecification.md) for more info on this). The TOSCA resource
+        specification document is used only for consistency verification between the two documents - ensuring that they refer
+        to the same service function chain and service function chain instance, as well as making sure that there is at least one
+        trigger alert in the alerts specification that relates to a state change policy in the resources specification.
+        
+    * Example for sending a request with curl:
+    
+        `curl -F "alert-spec=@alert-specification.yaml" -F "resource-spec=@resource-specification.yaml" http://loclahost:9080/alerts`
+        
+        where **alert-specification.yaml** is the path to the alerts specification file and **resource-specification.yaml** is the
+        path to the resource specification file.
+    
+    * Response:
+        
+        The response of this request is a JSON-formatted content, which contains the SFC and SFC instance identifiers
+        from the alert specification along with any errors encountered when interacting with Kapacitor.
+        
+        Returns a 400 Bad Request if the request does not contain a yaml file referenced with ID **resource-spec**.
+        
+        Returns a 400 Bad Request if the resource specification file is not a TOSCA-compliant valid YAML file.
+        
+        Returns a 400 Bad Request if the request does not contain a yaml file referenced with ID **alert-spec**.
+        
+        Returns a 400 Bad Request if the alert specification file is not a valid YAML file.
+        
+        Returns a 400 Bad Request if the alert specification file cannot be parsed with the TOSCA parser.
+        
+        Returns a 400 Bad Request if the alert specification file fails validation against the CLMC alerts specification schema.
+        
+        Returns a 400 Bad Request if there is inconsistencies between the alert specification and resource specification files -
+        e.g. referring to different service function chain and service function chain instance identifier or if there is no
+        alert in the alerts specification related to a given state change policy in the resources specification.
+        
+    * Response Body Example:
+    
+        ```json
+        {
+          "msg": "Alerts specification has been successfully validated and forwarded to Kapacitor", 
+          "service_function_chain_id": "<sfc_id>",
+          "service_function_chain_instance_id": "<sfc_instance_id>"
+        }
+        ```
+        
+        If the CLMC service encounters any errors when creating alerts and handlers in Kapacior, they will
+        be reported in the response as two lists of error objects. The **triggers_specification_errors** list contains
+        any errors encountered while trying to create the alert tasks; the **triggers_action_errors** list contains 
+        any errors encountered while trying to subscribe the HTTP handlers to the created tasks.
+     
+        ```json
+        {
+          "msg": "Alerts specification has been successfully validated and forwarded to Kapacitor", 
+          "service_function_chain_id": "<sfc_id>",
+          "service_function_chain_instance_id": "<sfc_instance_id>",
+          "triggers_action_errors": [
+                  {
+                      "trigger": "<trigger ID the error is related to>",
+                      "handler": "<handler URL the error is related to>",
+                      "policy": "<policy ID the error is related to>",
+                      "error": "<error message returned from Kapacitor>"
+                  }
+          ],
+          "triggers_specification_errors": [
+                {
+                      "trigger": "<trigger ID the error is related to>",
+                      "policy": "<policy ID the error is related to>",
+                      "error": "<error message returned from Kapacitor>"
+                }
+          ]
+        }
+        ```
+
 ## Graph API Endpoints
 
 * **Assumptions**
@@ -42,15 +163,14 @@ It offers different API endpoints to configure and control the aggregator as wel
     * For each service function, there is a field/fields from which an average estimation of the size of a **request** to this service function can be derived.
     * For each service function, there is a field/fields from which an average estimation of the size of a **response** from this service function can be derived.
     * All the aforementioned fields reside in a single measurement.
-    * There is at most 1 service function hosted on a particular endpoint.
 
 * **POST** ***/graph/temporal?from={timestamp-seconds}&to={timestamp-seconds}***
 
-    This API method sends a request to the CLMC service to build a graph related to the time range declared with the *from* and *to* URL parameters.
+    This API method sends a request to the CLMC service to build a graph related to the time range between the *from* and *to* timestamps (URL query parameters).
 
    * Request:
 
-        Expects a JSON-formatted request body which declares the database, retention policy and service function chain instance for which the graph is built.
+        Expects a JSON-formatted request body which declares the service function chain and service function chain instance for which the graph is built.
         The request should also include the service functions that must be included in the graph along with the measurement name, response time field, request size field and
         response size field for each service function. The declared fields could be influx functions across multiple fields.
 
@@ -58,8 +178,7 @@ It offers different API endpoints to configure and control the aggregator as wel
 
         ```json
         {
-          "database": "MSDemo",
-          "retention_policy": "autogen",
+          "service_function_chain": "MSDemo",
           "service_function_chain_instance": "MSDemo_1",
           "service_functions": {
             "nginx": {
@@ -81,34 +200,34 @@ It offers different API endpoints to configure and control the aggregator as wel
         These parameters are then filled in the following influx query template:
 
         ```
-        SELECT {0} AS mean_response_time, {1} AS mean_request_size, {2} AS mean_response_size FROM "{3}"."{4}".{5} WHERE sfc_i='{6}' and time>={7} and time<{8} GROUP BY ipendpoint, location, sf_i
+        SELECT {0} AS mean_response_time, {1} AS mean_request_size, {2} AS mean_response_size FROM "{3}"."{4}".{5} WHERE "flame_sfc"=\'{6}\' and "flame_sfci"=\'{7}\' and time>={8} and time<{9} GROUP BY "flame_sfe", "flame_location", "flame_sf"
         ```
 
-        E.g. for the minio service function, the following query will be used to retrieve the data from influx (request url is /graph/build?from=1528385420&to=1528385860):
+        E.g. for the minio service function, the following query will be used to retrieve the data from influx (request url is */graph/build?from=1528385420&to=1528385860*):
 
         ```
-        SELECT mean(sum)/mean(count) AS mean_response_time, mean(request_size)/mean(count) AS mean_request_size, mean(response_size)/mean(count) AS mean_response_size FROM "MSDemo"."autogen".minio_http_requests_duration_seconds WHERE sfc_i='MSDemo_1' and time>=1528385420000000000 and time<1528385860000000000 GROUP BY ipendpoint, location, sf_i
+        SELECT mean(sum)/mean(count) AS mean_response_time, mean(request_size)/mean(count) AS mean_request_size, mean(response_size)/mean(count) AS mean_response_size FROM "MSDemo"."autogen".minio_http_requests_duration_seconds WHERE "flame_sfc"='MSDemo' and "flame_sfci"='MSDemo_1' and time>=1528385420000000000 and time<1528385860000000000 GROUP BY "flame_sfe", "flame_location", "flame_sf"
         ```
-
+        
+        N.B. database name is assumed to be the SFC identifier
         N.B. timestamps are converted to nano seconds.
 
    * Response:
 
-        The response of this request is a JSON content, which contains all request parameters used to build the graph, along with a request UUID. This request ID can then be used to manage the temporal subgraph that was created
-        in response to this request.
+        The response of this request is a JSON content, which contains all request parameters used to build the graph, along with a request UUID. 
+        This request ID can then be used to manage the temporal subgraph that was created in response to this request.
 
         Returns a 400 Bad Request error if the request body is invalid.
 
         Returns a 400 Bad Request error if the request URL parameters are invalid or missing.
 
-        Returns a 400 Bad Request error if the service function chain instance ID is not in the format `<sfcID>_<numberID>`
-
    * Response Body Example:
 
         ```json
         {
           "database": "MSDemo",
           "retention_policy": "autogen",
+          "service_function_chain": "MSDemo",
           "service_function_chain_instance": "MSDemo_1",
           "service_functions": {
             "nginx": {
@@ -188,14 +307,13 @@ It offers different API endpoints to configure and control the aggregator as wel
             "response_time": 15.75,
             "round_trip_time": 81.75,
             "global_tags": {
-                "sr": "SR1",
-                "ipendpoint": "minio_1_ep1",
-                "sfc": "MSDemo",
-                "sf_i": "minio_1",
-                "location": "DC1",
-                "sf": "minio",
-                "sfc_i": "MSDemo_1",
-                "host": "host2"
+                "flame_sfe": "minio_1_ep1",
+                "flame_sfc": "MSDemo",
+                "flame_sfci": "MSDemo_1",
+                "flame_sfp": "minio",
+                "flame_sf": "minio_1",
+                "flame_location": "DC1",
+                "flame_server": "DC1"
             }
         }
         ```
@@ -217,167 +335,25 @@ It offers different API endpoints to configure and control the aggregator as wel
             "response_time": 3,
             "round_trip_time": 3,
             "global_tags": {
-                "sr": "SR1",
-                "ipendpoint": "minio_1_ep1",
-                "sfc": "MSDemo",
-                "sf_i": "minio_1",
-                "location": "DC1",
-                "sf": "minio",
-                "sfc_i": "MSDemo_1",
-                "host": "host2"
+                "flame_sfe": "minio_1_ep1",
+                "flame_sfc": "MSDemo",
+                "flame_sfci": "MSDemo_1",
+                "flame_sfp": "minio",
+                "flame_sf": "minio_1",
+                "flame_location": "DC1",
+                "flame_server": "DC1"
             }
         }
         ```
 
-## Aggregator API Endpoints
-
-**Note: this API is deprecated. The graph API should be used to compute RTT instead.**
-
-* **GET** ***/aggregator/config***
-
-    This API method retrieves information about the configuration of the aggregator.
-
-    * Response:
-
-        Returns a JSON-formatted response with the configuration data of the aggregator - *aggregator_report_period*, *aggregator_database_name*,
-        *aggregator_database_url*.
-
-    * Response Body Example:
-
-        ```json
-        {
-          "aggregator_report_period": 5,
-          "aggregator_database_name": "CLMCMetrics",
-          "aggregator_database_url": "http://172.40.231.51:8086"
-        }
-        ```
-
-* **PUT** ***/aggregator/config***
+* Generating network measurements
 
-    This API method updates the configuration of the aggregator.
-
-    * Request:
-
-        Expects a JSON-formatted request body with the new configuration of the aggregator. The body should contain only
-        three key fields - *aggregator_report_period* (positive integer, seconds), *aggregator_database_name* and *aggregator_database_url* (a valid URL).
-
-    * Request Body Example:
-
-        ```json
-        {
-          "aggregator_report_period": 25,
-          "aggregator_database_name": "CLMCMetrics",
-          "aggregator_database_url": "http://172.50.231.61:8086"
-        }
-        ```
-
-    * Response:
-
-        The body of the request is first validated before updating the configuration. If validation is successful, returns
-        a JSON-formatted response with the new configuration data. Otherwise, an **HTTP Bad Request** response is returned.
-
-    * Response Body Example:
-
-        ```json
-        {
-          "aggregator_report_period": 25,
-          "aggregator_database_name": "CLMCMetrics",
-          "aggregator_database_url": "http://172.50.231.61:8086"
-        }
-        ```
-
-    * Notes:
-
-        If the configuration is updated, while the aggregator is running, it is not automatically restarted. An explicit API call
-        must be made with a *restart* request to apply the updated configuration. In the case of such PUT request as the one described
-        above, the response will contain more information indicating that the configuration of the aggregator is in a malformed state.
-
-        * Response Body Example:
-
-            ```json
-            {
-              "aggregator_report_period": 125,
-              "aggregator_database_name": "CLMCMetrics",
-              "aggregator_database_url": "http://172.50.231.61:8086/",
-              "malformed": true,
-              "comment": "Aggregator is running in a malformed state - it uses an old version of the configuration. Please, restart it so that the updated configuration is used."
-            }
-            ```
-
-* **GET** ***/aggregator/control***
-
-    This API method retrieves information about the status of the aggregator - whether it is running or not.
-
-    * Response:
-
-        Returns a JSON-formatted response with the status data of the aggregator - *aggregator_running* field. If the aggregator
-        is running in a malformed state, the response will also indicate this with two additional fields - *malformed* and *comment*.
-
-    * Response Body Example:
-
-        ```json
-        {
-          "aggregator_running": true
-        }
-        ```
-
-    * Response Body Example - for malformed configuration:
-
-        ```json
-        {
-          "aggregator_running": true,
-          "malformed": true,
-          "comment": "Aggregator is running in a malformed state - it uses an old version of the configuration. Please, restart it so that the updated configuration is used."
-        }
-        ```
-
-* **PUT** ***/aggregator/control***
-
-    This API method updates the status of the aggregator - a user can start, stop or restart it.
-
-    * Request:
-
-        Expects a JSON-formatted request body with the new status of the aggregator. The body should contain only one key
-        field - *action* (the action to undertake, which can be **start**, **restart** or **stop**)
-
-    * Request Body Example:
-
-        ```json
-        {
-          "action": "start"
-        }
-        ```
-
-    * Response:
-
-        The body of the request is first validated before taking any actions. If the action is not one of the listed above,
-        then the validation will fail. If validation is successful, returns a JSON-formatted response with the new status of
-        the aggregator. Otherwise, an **HTTP Bad Request** response is returned.
-
-    * Response Body Example:
-
-        ```json
-        {
-          "aggregator_running": true
-        }
-        ```
-
-    * Notes:
-
-        * If a **start** action is requested, while the aggregator is running, then the request will be ignored. To restart the
-        aggregator, a user should use a **restart** action.
-
-        * If a **stop** action is requested, while the aggregator is not running, then the request will be ignored.
-
-        * A request with a **restart** action, while the aggregator is not running, has the same functionality as a request
-        with a **start** action.
-
-        * The functionality of a request with a **restart** action is the same as the functionlity of a **stop** action
-        followed by a **start** action.
+    To generate network measurements, which are then used to create the network topology in the Neo4j graph, refer to
+    the src/service/clmcservice/generate_network_measurements.py script. An example configuration file is src/service/resources/GraphAPI/network_config.json
 
 ## CRUD API for service function endpoint configurations
 
-**Note: this API is experimental and is not intended to be used**
+**Note: this API is experimental and is not intended to be used at this stage**
 
 * **GET** ***/whoami/endpoints***
 
@@ -399,50 +375,50 @@ It offers different API endpoints to configure and control the aggregator as wel
         [
           {
            "location": "location_1",
+           "server": "location_1",
            "sfc": "sfc_1",
-           "sfc_i": "sfc_i_1",
-           "sf": "sf_1",
-           "sf_i": "sf_i_1",
-           "sf_endpoint": "sf_endpoint_1",
-           "sr": "sr_1"
-            },
+           "sfc_instance": "sfc_i_1",
+           "sf_package": "sf_1",
+           "sf": "sf_i_1",
+           "sf_endpoint": "sf_endpoint_1"
+           },
           {
            "location": "location_2",
+           "server": "location_2",
            "sfc": "sfc_2",
-           "sfc_i": "sfc_i_2",
-           "sf": "sf_2",
-           "sf_i": "sf_i_2",
-           "sf_endpoint": "sf_endpoint_2",
-           "sr": "sr_2"
-            }
+           "sfc_instance": "sfc_i_2",
+           "sf_package": "sf_2",
+           "sf": "sf_i_2",
+           "sf_endpoint": "sf_endpoint_2"
+           }
         ]
         ```
 
-* **GET** ***/whoami/endpoints/instance?sr={sr_id}&sf_i={sf_instance_id}&sf_endpoint={sf_endpoint_id}***
+* **GET** ***/whoami/endpoints/instance?sf_endpoint={sf_endpoint_id}***
 
-    This API method retrieves the uniquely defined service function endpoint configuration associated with the given URL parameters - sr, sf_i and sf_endpoint.
+    This API method retrieves the uniquely defined service function endpoint configuration associated with the given URL parameter - sf_endpoint.
 
     * Response:
 
         Returns a JSON-formatted response - a JSON object representing the service function endpoint configuration if it exists.
 
-        Returns a 404 Not Found error if there is no service function endpoint configuration associated with the given URL parameters.
+        Returns a 404 Not Found error if there is no service function endpoint configuration associated with the given URL parameter.
 
-        Returns a 400 Bad Request error if the url parameters are invalid.
+        Returns a 400 Bad Request error if the url parameter is invalid or missing.
 
     * Response Body Example:
 
-        - Request made to /whoami/endpoints/instance?sr=sr_1&sf_i=sf_i_1&sf_endpoint=sf_endpoint_1
+        - Request made to /whoami/endpoints/instance?sf_endpoint=sf_endpoint_1
         ```json
-        {
-          "location": "location_1",
-          "sfc": "sfc_1",
-          "sfc_i": "sfc_i_1",
-          "sf": "sf_1",
-          "sf_i": "sf_i_1",
-          "sf_endpoint": "sf_endpoint_1",
-          "sr": "sr_1"
-           }
+          {
+           "location": "location_1",
+           "server": "location_1",
+           "sfc": "sfc_1",
+           "sfc_instance": "sfc_i_1",
+           "sf_package": "sf_1",
+           "sf": "sf_i_1",
+           "sf_endpoint": "sf_endpoint_1"
+          }
         ```
 
 * **POST** ***/whoami/endpoints***
@@ -455,17 +431,17 @@ It offers different API endpoints to configure and control the aggregator as wel
 
     * Request Body Example:
 
-    ```json
-        {
-          "location": "location_1",
-          "sfc": "sfc_1",
-          "sfc_i": "sfc_i_1",
-          "sf": "sf_1",
-          "sf_i": "sf_i_1",
-          "sf_endpoint": "sf_endpoint_1",
-          "sr": "sr_1"
-           }
-     ```
+        ```json
+          {
+           "location": "location_1",
+           "server": "location_1",
+           "sfc": "sfc_1",
+           "sfc_instance": "sfc_i_1",
+           "sf_package": "sf_1",
+           "sf": "sf_i_1",
+           "sf_endpoint": "sf_endpoint_1"
+          }
+        ```
 
     * Response
 
@@ -473,25 +449,25 @@ It offers different API endpoints to configure and control the aggregator as wel
 
         Returns a 400 Bad Request error if the request body is invalid.
 
-        Returns a 409 Conflict error if there exists another service function endpoint configuration with the same 'sr', 'sf_i' and 'sf_endpoint' values.
+        Returns a 409 Conflict error if there exists another service function endpoint configuration with the same 'sf_endpoint' ID.
 
     * Response Body Example:
 
-    ```json
-        {
-          "location": "location_1",
-          "sfc": "sfc_1",
-          "sfc_i": "sfc_i_1",
-          "sf": "sf_1",
-          "sf_i": "sf_i_1",
-          "sf_endpoint": "sf_endpoint_1",
-          "sr": "sr_1"
-           }
-     ```
+        ```json
+          {
+           "location": "location_1",
+           "server": "location_1",
+           "sfc": "sfc_1",
+           "sfc_instance": "sfc_i_1",
+           "sf_package": "sf_1",
+           "sf": "sf_i_1",
+           "sf_endpoint": "sf_endpoint_1"
+          }
+        ```
 
-* **PUT** ***/whoami/endpoints/instance?sr={sr_id}&sf_i={sf_instance_id}&sf_endpoint={sf_endpoint_id}***
+* **PUT** ***/whoami/endpoints/instance?sf_endpoint={sf_endpoint_id}***
 
-    This API method replaces the uniquely defined service function endpoint configuration associated with the given URL parameters - sr, sf_i and sf_endpoint with a new service
+    This API method replaces the uniquely defined service function endpoint configuration associated with the given URL parameter - sf_endpoint, with a new service
     function endpoint configuration given in the request body (JSON format). It can also be used for updating.
 
     * Request:
@@ -500,17 +476,17 @@ It offers different API endpoints to configure and control the aggregator as wel
 
     * Request Body Example:
 
-    ```json
-        {
-          "location": "location_2",
-          "sfc": "sfc_1",
-          "sfc_i": "sfc_i_1",
-          "sf": "sf_1",
-          "sf_i": "sf_i_1",
-          "sf_endpoint": "sf_endpoint_1",
-          "sr": "sr_1"
-           }
-     ```
+        ```json
+          {
+           "location": "location_2",
+           "server": "location_2",
+           "sfc": "sfc_1",
+           "sfc_instance": "sfc_i_1",
+           "sf_package": "sf_1",
+           "sf": "sf_i_1",
+           "sf_endpoint": "sf_endpoint_1"
+          }
+        ```
 
     * Response
 
@@ -518,105 +494,52 @@ It offers different API endpoints to configure and control the aggregator as wel
 
         Returns a 400 Bad Request error if the request body is invalid.
 
-        Returns a 400 Bad Request error if the url parameters are invalid.
+        Returns a 400 Bad Request error if the url parameter is invalid.
 
-        Returns an 404 Not Found error if there is no service function endpoint configuration associated with the given URL parameters.
+        Returns an 404 Not Found error if there is no service function endpoint configuration associated with the given URL parameter.
 
-        Returns a 409 Conflict error if there exists another service function endpoint configuration with the same 'sr', 'sf_i' and 'sf_endpoint' values as the ones in the request body.
+        Returns a 409 Conflict error if there exists another service function endpoint configuration with the same 'sf_endpoint' ID as the ones in the request body.
 
     * Response Body Example:
 
-        - Request made to /whoami/endpoints/instance?sr=sr_1&sf_i=sf_i_1&sf_endpoint=sf_endpoint_1
+        - Request made to /whoami/endpoints/instance?sf_endpoint=sf_endpoint_1
+
         ```json
-            {
-              "location": "location_2",
-              "sfc": "sfc_1",
-              "sfc_i": "sfc_i_1",
-              "sf": "sf_1",
-              "sf_i": "sf_i_1",
-              "sf_endpoint": "sf_endpoint_1",
-              "sr": "sr_1"
-               }
-         ```
+          {
+           "location": "location_2",
+           "server": "location_2",
+           "sfc": "sfc_1",
+           "sfc_instance": "sfc_i_1",
+           "sf_package": "sf_1",
+           "sf": "sf_i_1",
+           "sf_endpoint": "sf_endpoint_1"
+          }
+        ```
 
-* **DELETE** ***/whoami/endpoints/instance?sr={sr_id}&sf_i={sf_instance_id}&sf_endpoint={sf_endpoint_id}***
+* **DELETE** ***/whoami/endpoints/instance?sf_endpoint={sf_endpoint_id}***
 
-    This API method deletes the uniquely defined service function endpoint configuration associated with the given URL parameters - sr, sf_i and sf_endpoint.
+    This API method deletes the uniquely defined service function endpoint configuration associated with the given URL parameter - sf_endpoint.
 
     * Response:
 
         Returns the JSON representation of the deleted object.
 
-        Returns an 404 Not Found error if there is no service function endpoint configuration associated with the given URL parameters.
+        Returns an 404 Not Found error if there is no service function endpoint configuration associated with the given URL parameter.
 
-        Returns a 400 Bad Request error if the url parameters are invalid.
+        Returns a 400 Bad Request error if the url parameter is invalid.
 
     * Response Body Example:
 
-        - Request made to /whoami/endpoints/instance?sr=sr_1&sf_i=sf_i_1&sf_endpoint=sf_endpoint_1
+        - Request made to /whoami/endpoints/instance?sf_endpoint=sf_endpoint_1
+
         ```json
-         {
-          "location": "location_1",
-          "sfc": "sfc_1",
-          "sfc_i": "sfc_i_1",
-          "sf": "sf_1",
-          "sf_i": "sf_i_1",
-          "sf_endpoint": "sf_endpoint_1",
-          "sr": "sr_1"
-           }
+          {
+           "location": "location_1",
+           "server": "location_1",
+           "sfc": "sfc_1",
+           "sfc_instance": "sfc_i_1",
+           "sf_package": "sf_1",
+           "sf": "sf_i_1",
+           "sf_endpoint": "sf_endpoint_1"
+          }
         ```
-
-## Installing and running the CLMC service
-
-Before installing the CLMC service and its dependencies, it is recommended to use a python virtual environment. To easily
-manage virtual environments, **virtualenvwrapper** can be used.
-
-```shell
-pip3 install virtualenvwrapper
-```
-
-To create a virtual environment use the **mkvirtualenv** command:
-
-```shell
-mkvirtualenv CLMC
-```
-
-When created, you should already be set to use the new virtual environment, but to make sure of this use the **workon** command:
-
-```shell
-workon CLMC
-```
-
-Now, any installed libraries will be installed relative to this environment only.
-
-The easiest way to install and use the CLMC service locally is to use **pip**. Navigate to the clmc-service folder:
-
-```shell
-cd src/service
-```
-
-Test the CLMC service using **tox** along with the ***tox.ini*** configuration file. If tox is not installed run:
-
-```shell
-pip3 install tox
-```
-
-After it is installed, simply use the **tox** command:
-
-```shell
-tox
-```
-
-Then install the service.
-
-```shell
-pip3 install .
-```
-
-Finally, start the service on localhost by using pyramid's **pserve** command line utility:
-
-```shell
-pserve production.ini
-```
-
-You should now be able to make requests to the CLMC service on the various API endpoints.
diff --git a/src/test/clmctest/dashboards/README.md b/docs/dashboards.md
similarity index 74%
rename from src/test/clmctest/dashboards/README.md
rename to docs/dashboards.md
index cb5454cc812b7f2d1a958b95b4b7aeabbb74c89e..2f4f8dd803a4907f47cf39ef2c3abcdbf3132db1 100644
--- a/src/test/clmctest/dashboards/README.md
+++ b/docs/dashboards.md
@@ -1,6 +1,6 @@
 # Sample Chronograf Dashboards
 
-This folder contains several sample dashboards for use in Chronograf.
+The folder located at src/test/clmctest/dashboards contains several sample dashboards for use in Chronograf.
 
 ## Loading a dashboard
 
@@ -22,11 +22,11 @@ The pipe into the `jq` command is not strictly necessary, it is there to pretty-
 
 ## Overview
 
-### dc_dash.json
+### src/test/clmctest/dashboards/dc_dash.json
 
 Displays the average CPU usage over time for an entire data centre. It has a dashboard variable for the `location` field.
 
-### sf_dash.json
+### src/test/clmctest/dashboards/sf_dash.json
 
 The service function dashboard has two dashboard variables to choose two different service functions to display side by side (left and right column).
 
@@ -35,29 +35,29 @@ Each column displays the total network traffic sent and received in MB over time
 To get the top chart, a nested select statement is used:
 
 ```sql
-select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last("bytes_recv") / 1048576 AS "RX_MB", last("bytes_sent") / 1048576 AS "TX_MB" FROM "MSDemo"."autogen"."net" WHERE time > :dashboardTime: AND "sf"=:sf1: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m)
+select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last("bytes_recv") / 1048576 AS "RX_MB", last("bytes_sent") / 1048576 AS "TX_MB" FROM "MSDemo"."autogen"."net" WHERE time > :dashboardTime: AND "flame_sfp"=:sf1: GROUP BY time(1m), "flame_sfe" FILL(null)) group by time(1m)
 ```
 
 (The constant 1048576 is 1024*1024)
 
-The inner select groups by ipendpoint and time, taking the maximum value in each time period for each ipendpoint and the outer select queries over the result of the inner select but then groups only by time.
+The inner select groups by sfe (servince function endpoint) and time, taking the maximum value in each time period for each service function endpoint and the outer select queries over the result of the inner select but then groups only by time.
 
 The derivative of the first chart requires a further nested select:
 
 ```sql
-select derivative(total_RX_MB, 1m) / 60 as RX_MB_per_s, derivative(total_TX_MB, 1m) / 60 as TX_MB_per_s from (select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last("bytes_recv") / 1048576 AS "RX_MB", last("bytes_sent") / 1048576 AS "TX_MB" FROM "MSDemo"."autogen"."net" WHERE time > :dashboardTime: AND "sf"=:sf1: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m))
+select derivative(total_RX_MB, 1m) / 60 as RX_MB_per_s, derivative(total_TX_MB, 1m) / 60 as TX_MB_per_s from (select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last("bytes_recv") / 1048576 AS "RX_MB", last("bytes_sent") / 1048576 AS "TX_MB" FROM "MSDemo"."autogen"."net" WHERE time > :dashboardTime: AND "flame_sfp"=:sf1: GROUP BY time(1m), "flame_sfe" FILL(null)) group by time(1m))
 ```
 
 The outer-most select taakes the derivative of the first chart for each data set separately. The derivative function is parameterised to understand it is over a 1m period but then the result is divided by 60 to give an average MB/s in each 1 minute period.
 
-### minio_dash.json
+### src/test/clmctest/dashboards/minio_dash.json
 
-The minio dashboard has two dashboard variables to choose two different ipendpoints to display side by side (left and right column). Minio endpoints must be chosen for all features to work.
+The minio dashboard has two dashboard variables to choose two different service function endpoints to display side by side (left and right column). Minio endpoints must be chosen for all features to work.
 
 The top chart shows the percentage of requests being served in less than a fixed set up time periods. This performance metric would highlight a service endpoint which was struggling to service demand.
 
 The other charts show network traffic using a similar formulation to the `sf_dash` described above.
 
-### nginx_dash.json
+### src/test/clmctest/dashboards/nginx_dash.json
 
 There are no dashboard variables on this dashboard: it is hard-coded to show the `nginx_1_ep1` and `nginx_1_ep2` endpoints. Various charts (network, CPU, responses per second) are displayed.
diff --git a/docs/image/CLMC-Notifications-SequenceDiagram.png b/docs/image/CLMC-Notifications-SequenceDiagram.png
new file mode 100644
index 0000000000000000000000000000000000000000..f8bbd48943f96aafe61241068c893b72dadcca1b
Binary files /dev/null and b/docs/image/CLMC-Notifications-SequenceDiagram.png differ
diff --git a/docs/monitoring.md b/docs/monitoring.md
index f1104ea6601edc279ba7a8d6d3c83cbe6ae1003a..fe1bd9ed2d9f8aab3666b65731f21e873d51006c 100644
--- a/docs/monitoring.md
+++ b/docs/monitoring.md
@@ -184,13 +184,13 @@ The measurement model considers three monitoring views on a surrogate with field
 
 All of the measurements on a surrogate share a common context that includes tag values:
 
-* sfc – an orchestration template
-* sfc_i – an instance of the orchestration template
-* sf – a SF type
-* sf_i – an instance of the SF type
-* surrogate – an authoritive copy of the SF instance either VM or container
-* server – a physical or virtual server for hosting VM or container instances
-* location – the location of the server
+* flame_sfc – an orchestration template (a.k.a. Service Function Chain)
+* flame_sfci – an instance of the orchestration template (a.k.a. Service Function Chain Instance)
+* flame_sfp – the package a service function is using (a.k.a. Service Function Package)
+* flame_sf – the service function defined in the TOSCA resource specification (a.k.a. Service Function)
+* flame_sfe – an authoritative copy of the SF - either VM or container (a.k.a. Service Function Endpoint)
+* flame_server – a cluster VM inside which service function endpoints are placed (at the current stage, the value of this tag is the same as the *location* tag)
+* flame_location – the location of the server - physical machine that hosts the cluster VM
 
 By including this context with service, network and host measurements it is possible to support range of temporal queries associated with SFC’s. By adopting the same convention for identifiers it is possible to combine measurements across service, network and host to create new series that allows exploration of different aspects of the VM instance, including cross-layer queries.
 
@@ -426,7 +426,7 @@ The following queries illustrate how to calculate _mean time between failures_ (
 _Q. What is the Mean Time Between Failures (MTBF) of endpoint 'adaptive_streaming_I1_apache1'?_
 
 ```
-select mean(connected_mst) as "apache1_MTBF(s)" from "endpoint_config" where connected_mst <> 0 and ipendpoint = 'adaptive_streaming_I1_apache1'
+select mean(connected_mst) as "apache1_MTBF(s)" from "endpoint_config" where connected_mst <> 0 and "flame_sfe" = 'adaptive_streaming_I1_apache1'
 ```
 
 ```
@@ -439,12 +439,12 @@ time apache1_MTBF(s)
 _Q. What is the Mean Down Time (MDT) of endpoint 'adaptive_streaming_I1_apache1'?_
 
 ```
-select mean(unplaced_mst) as "unplaced_mdt" into "endpoint_config_mdt" from "endpoint_config" where unplaced_mst <> 0 and ipendpoint = 'adaptive_streaming_I1_apache1'
-select mean(placing_mst) as "placing_mdt" into "endpoint_config_mdt" from "endpoint_config" where placing_mst <> 0 and ipendpoint = 'adaptive_streaming_I1_apache1'
-select mean(placed_mst) as "placed_mdt" into "endpoint_config_mdt" from "endpoint_config" where placed_mst <> 0 and ipendpoint = 'adaptive_streaming_I1_apache1'
-select mean(booting_mst) as "booting_mdt" into "endpoint_config_mdt" from "endpoint_config" where booting_mst <> 0 and ipendpoint = 'adaptive_streaming_I1_apache1'
-select mean(booted_mst) as "booted_mdt" into "endpoint_config_mdt" from "endpoint_config" where booted_mst <> 0 and ipendpoint = 'adaptive_streaming_I1_apache1'
-select mean(connecting_mst) as "connecting_mdt" into "endpoint_config_mdt" from "endpoint_config" where connecting_mst <> 0 and ipendpoint = 'adaptive_streaming_I1_apache1'
+select mean(unplaced_mst) as "unplaced_mdt" into "endpoint_config_mdt" from "endpoint_config" where unplaced_mst <> 0 and "flame_sfe" = 'adaptive_streaming_I1_apache1'
+select mean(placing_mst) as "placing_mdt" into "endpoint_config_mdt" from "endpoint_config" where placing_mst <> 0 and "flame_sfe" = 'adaptive_streaming_I1_apache1'
+select mean(placed_mst) as "placed_mdt" into "endpoint_config_mdt" from "endpoint_config" where placed_mst <> 0 and "flame_sfe" = 'adaptive_streaming_I1_apache1'
+select mean(booting_mst) as "booting_mdt" into "endpoint_config_mdt" from "endpoint_config" where booting_mst <> 0 and "flame_sfe" = 'adaptive_streaming_I1_apache1'
+select mean(booted_mst) as "booted_mdt" into "endpoint_config_mdt" from "endpoint_config" where booted_mst <> 0 and "flame_sfe" = 'adaptive_streaming_I1_apache1'
+select mean(connecting_mst) as "connecting_mdt" into "endpoint_config_mdt" from "endpoint_config" where connecting_mst <> 0 and "flame_sfe" = 'adaptive_streaming_I1_apache1'
 select (unplaced_mdt + placing_mdt + placed_mdt + booting_mdt + booted_mdt + connecting_mdt) as "MDT(s)" from "endpoint_config_mdt"
 ```
 
@@ -536,13 +536,13 @@ ipendpoint measurements measure the configuration, usage and performance of VM/C
 
 Common tags
 
-* location – a physical or virtual server for hosting nodes instances
-* server – the location of the server
-* sfc – an orchestration template
-* sfc_i – an instance of the orchestration template
-* sf – a SF package identifier indicating the type and version of SF
-* sf_i – an instance of the SF type
-* ipendpoint – an authoritive copy of the SF instance either a container or VM
+* flame_sfc – an orchestration template (a.k.a. Service Function Chain)
+* flame_sfci – an instance of the orchestration template (a.k.a. Service Function Chain Instance)
+* flame_sfp – the package a service function is using (a.k.a. Service Function Package)
+* flame_sf – the service function defined in the TOSCA resource specification (a.k.a. Service Function)
+* flame_sfe – an authoritative copy of the SF - either VM or container (a.k.a. Service Function Endpoint)
+* flame_server – a cluster VM inside which service function endpoints are placed (at the current stage, the value of this tag is the same as the *location* tag)
+* flame_location – the location of the server - physical machine that hosts the cluster VM
 
 #### ipendpoint Measurements
 
diff --git a/docs/total-service-request-delay.md b/docs/total-service-request-delay.md
index a27f47665f3f77341193feff8f74f51e89f5bb3b..66aa241937b42383005b74eb925d4620b1cc08ab 100644
--- a/docs/total-service-request-delay.md
+++ b/docs/total-service-request-delay.md
@@ -1,3 +1,27 @@
+<!--
+// © University of Southampton IT Innovation Centre, 2017
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road, 
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Stephen Phillips
+//      Created Date :          23-05-2018
+//      Created for Project :   FLAME
+-->
+
+
 # Round Trip Time of a Service Request
 
 The Round Trip Time (RTT) of a network is the time taken from sending a packet to receiving the acknowlegement. We are also interested in factoring in the size of the data being sent over the network and the delay caused by the service processing the request.
diff --git a/scripts/clmc-agent/configure.sh b/scripts/clmc-agent/configure.sh
index 9f98a51303b1e363a9dbf125c6c775742cf82849..13a87bf9e794a017c062c74a29718459219afab0 100755
--- a/scripts/clmc-agent/configure.sh
+++ b/scripts/clmc-agent/configure.sh
@@ -20,6 +20,8 @@
 #//
 #//      Created By :            Michael Boniface
 #//      Created Date :          13/12/2017
+#//      Updated By :            Nikolay Stanchev
+#//      Updated Date :          30/08/2018
 #//      Created for Project :   FLAME
 #//
 #/////////////////////////////////////////////////////////////////////////
@@ -30,21 +32,21 @@ set -euo pipefail
 echo "Configuring Telegraf agent general and output configuration"
 
 # Get command line parameters
-if [ "$#" -ne 9 ]; then
+if [ "$#" -ne 7 ]; then
     echo "Error: illegal number of arguments: "$#
-    echo "Usage: configure.sh LOCATION SFC_ID SFC_ID_INSTANCE SF_ID SF_ID_INSTANCE IP_ENDPOINT_ID SR_ID INFLUXDB_URL DATABASE_NAME"
+    echo "Usage: configure.sh LOCATION SFC_ID SFC_INSTANCE_ID SF_PACKAGE_ID SF_ID SF_ENDPOINT_ID INFLUXDB_URL"
     exit 1 
 fi
 
 LOCATION=$1
+SERVER=${LOCATION}
 SFC_ID=$2
-SFC_ID_INSTANCE=$3
-SF_ID=$4
-SF_ID_INSTANCE=$5
-IP_ENDPOINT_ID=$6
-SR_ID=$7
-INFLUXDB_URL=$8
-DATABASE_NAME=$9
+SFC_INSTANCE_ID=$3
+SF_PACKAGE_ID=$4
+SF_ID=$5
+SF_ENDPOINT_ID=$6
+INFLUXDB_URL=$7
+DATABASE_NAME=${SFC_ID}  # DATABASE IS NAMED AFTER SFC ID
 
 TELEGRAF_CONF_DIR="/etc/telegraf"
 TELEGRAF_CONF_FILE=${TELEGRAF_CONF_DIR}"/telegraf.conf"
@@ -56,11 +58,10 @@ TELEGRAF_OUTPUT_CONF_FILE=${TELEGRAF_INCLUDE_CONF_DIR}"/telegraf_output.conf"
 # Replace template parameters on general configuration
 sed -i 's/${LOCATION}/'${LOCATION}'/g' ${TELEGRAF_CONF_FILE}
 sed -i 's/${SFC_ID}/'${SFC_ID}'/g' ${TELEGRAF_CONF_FILE}
-sed -i 's/${SFC_ID_INSTANCE}/'${SFC_ID_INSTANCE}'/g' ${TELEGRAF_CONF_FILE}
+sed -i 's/${SFC_INSTANCE_ID}/'${SFC_INSTANCE_ID}'/g' ${TELEGRAF_CONF_FILE}
+sed -i 's/${SF_PACKAGE_ID}/'${SF_PACKAGE_ID}'/g' ${TELEGRAF_CONF_FILE}
 sed -i 's/${SF_ID}/'${SF_ID}'/g' ${TELEGRAF_CONF_FILE}
-sed -i 's/${SF_ID_INSTANCE}/'${SF_ID_INSTANCE}'/g' ${TELEGRAF_CONF_FILE}
-sed -i 's/${IP_ENDPOINT_ID}/'${IP_ENDPOINT_ID}'/g' ${TELEGRAF_CONF_FILE}
-sed -i 's/${SR_ID}/'${SR_ID}'/g' ${TELEGRAF_CONF_FILE}
+sed -i 's/${SF_ENDPOINT_ID}/'${SF_ENDPOINT_ID}'/g' ${TELEGRAF_CONF_FILE}
 
 echo "Telegraf Output Configuration File: ${TELEGRAF_OUTPUT_CONF_FILE}"
 
diff --git a/scripts/clmc-agent/telegraf.conf b/scripts/clmc-agent/telegraf.conf
index 07ac73a4dcf6a067a099ce629d9fc4f81bfb1ed2..039502c4b469c2554dad3084222f83b88d4f757f 100644
--- a/scripts/clmc-agent/telegraf.conf
+++ b/scripts/clmc-agent/telegraf.conf
@@ -17,6 +17,8 @@
 ##
 ##      Created By :            Simon Crowle
 ##      Created Date :          03-01-2018
+##      Updated By :            Nikolay Stanchev
+##      Updated Date :          30-08-2018
 ##      Created for Project :   FLAME
 
 # Telegraf configuration
@@ -33,19 +35,19 @@
 # Global tags can be specified here in key="value" format.
 [global_tags]
   # location of the data centre
-  location="${LOCATION}"
-  # media service template id
-  sfc="${SFC_ID}"
-  # media service instance
-  sfc_i="${SFC_ID_INSTANCE}"
-  # service function type
-  sf="${SF_ID}"
-  # service function instance id
-  sf_i="${SF_ID_INSTANCE}"
-  # ipendpoint id aka surrogate instance
-  ipendpoint="${IP_ENDPOINT_ID}"
-  # the service router providing access to the network
-  sr="${SR_ID}"
+  "flame_location"="${LOCATION}"
+  # hostname of cluster/server (potentially virtualized) that boots up service function containers
+  "flame_server" = "${LOCATION}"
+  # media service template id (defined in the TOSCA resource spec.)
+  "flame_sfc"="${SFC_ID}"
+  # media service instance id (defined in the TOSCA resource spec.)
+  "flame_sfci"="${SFC_INSTANCE_ID}"
+  # service function package id (packaging time)
+  "flame_sfp"="${SF_PACKAGE_ID}"
+  # service function id (defined in the TOSCA resource spec.)
+  "flame_sf"="${SF_ID}"
+  # service function IP endpoint id aka surrogate instance
+  "flame_sfe"="${SF_ENDPOINT_ID}"
 
 # Configuration for telegraf agent
 [agent]
diff --git a/scripts/clmc-agent/telegraf_output.conf b/scripts/clmc-agent/telegraf_output.conf
index 7521ac34a05a0c8f7ccdc166a772fbd2d0ee5544..734d93c965bb0919de37da55f4f06c3cad6398c1 100644
--- a/scripts/clmc-agent/telegraf_output.conf
+++ b/scripts/clmc-agent/telegraf_output.conf
@@ -17,6 +17,8 @@
 ##
 ##      Created By :            Michael Boniface
 ##      Created Date :          08-03-2018
+##      Updated By :            Nikolay Stanchev
+##      Updated Date :          30-08-2018
 ##      Created for Project :   FLAME
 
 ###############################################################################
@@ -31,7 +33,7 @@
   # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
   urls = ["${INFLUXDB_URL}"] # required
   # The target database for metrics (telegraf will create it if not exists)
-  database = "${DATABASE_NAME}" # required
+  database = "${DATABASE_NAME}" # required - MUST BE NAMED AFTER THE SERVICE FUNCTION CHAIN ID
   # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
   # note: using second precision greatly helps InfluxDB compression
   precision = "s"
diff --git a/scripts/clmc-service/install-clmc-service.sh b/scripts/clmc-service/install-clmc-service.sh
index 75a6453bbfb59af31ad783d215f090a25ab6b489..c6ba51b259638e1ec105ec3418535bc79038aa27 100755
--- a/scripts/clmc-service/install-clmc-service.sh
+++ b/scripts/clmc-service/install-clmc-service.sh
@@ -1,16 +1,5 @@
 #!/bin/bash
 
-# Get command line parameters
-if [ "$#" -ne 3 ]; then
-    echo "Error: illegal number of arguments: "$#
-    echo "Usage: install-clmc-service.sh INFLUX_URL DATABASE_NAME REPORT_PERIOD"
-    exit 1 
-fi
-
-INFLUX_URL=$1
-DATABASE_NAME=$2
-REPORT_PERIOD=$3
-
 apt-get update
 apt-get install libssl-dev -y
 
@@ -107,7 +96,7 @@ if [ $? -ne 0 ] ; then
 		exit 1
 fi
 
-# Install minioclmc as systemctl service
+# Install clmc as systemctl service
 # -----------------------------------------------------------------------
 mkdir -p /opt/flame/clmc
 start_script_file="/opt/flame/clmc/start.sh"
@@ -142,12 +131,7 @@ do
   sleep 5
 done
 
-# configure the CLMC service
-JSON="{\"aggregator_report_period\": ${REPORT_PERIOD}, \"aggregator_database_name\": \"${DATABASE_NAME}\", \"aggregator_database_url\": \"${INFLUX_URL}\"}"
-echo "CONFIG JSON=${JSON}"
-curl -H 'Content-Type: application/json' -X PUT -d "${JSON}" http://localhost:9080/aggregator/config
-
-# start the aggregator
-JSON="{\"action\": \"start\"}"
-echo "START ACTION JSON=${JSON}"
-curl -H 'Content-Type: application/json' -X PUT -d "${JSON}" http://localhost:9080/aggregator/control
\ No newline at end of file
+# install and start nginx
+apt-get install nginx -y
+cp ${REPO_ROOT}/scripts/clmc-service/nginx.conf /etc/nginx/nginx.conf
+systemctl restart nginx  # nginx is already started on installation, to read the new conf it needs to be restarted
\ No newline at end of file
diff --git a/scripts/clmc-service/install-tick-stack.sh b/scripts/clmc-service/install-tick-stack.sh
index d2f639d68218b22e69d9c6bea6b9503770ed1706..2ca0c5464429309afbad3f6ea429dfe6e5cdeb4a 100755
--- a/scripts/clmc-service/install-tick-stack.sh
+++ b/scripts/clmc-service/install-tick-stack.sh
@@ -45,7 +45,27 @@ if [ $? == 1 ]; then
 fi
 dpkg -i chronograf_${CHRONOGRAF_VERSION}_amd64.deb
 
+# configure Chronograf systemd service file
+sed -i 's/^ExecStart.*/ExecStart=\/usr\/bin\/chronograf --basepath=\/chronograf --prefix-routes $CHRONOGRAF_OPTS/' /lib/systemd/system/chronograf.service
+systemctl daemon-reload
+
 systemctl start influxdb
 systemctl start kapacitor
-systemctl start chronograf
+systemctl restart chronograf
+
+# wait for kapacitor to start
+while ! nc -z localhost 9092
+do
+  echo "Waiting for kapacitor port 9092 to be ready on localhost..."
+  sleep 1
+done
+
+# define the TICK script templates within Kapacitor
+echo "----> Creating alert templates in Kapacitor"
+cd ${REPO_ROOT}/src/service/resources/TICKscript
+for template_file in *.tick; do
+    template_id=$(basename ${template_file} .tick)
+    echo ${template_id} ${template_file}
+    kapacitor define-template ${template_id} -tick ${template_file}
+done
 
diff --git a/scripts/clmc-service/install.sh b/scripts/clmc-service/install.sh
index 420dddab39d17444c001114ee0427b7167e4b60d..90039438a70ee2c502a3d335f9174f80be87cdf9 100755
--- a/scripts/clmc-service/install.sh
+++ b/scripts/clmc-service/install.sh
@@ -27,18 +27,11 @@
 # Force fail on command fail (off for now as virtualenvwrapper install fails)
 set -euo pipefail
 
-# Get command line parameters
-if [ "$#" -ne 3 ]; then
-    echo "Error: illegal number of arguments: "$#
-    echo "Usage: install.sh INFLUX_URL DATABASE_NAME REPORT_PERIOD"
-    exit 1 
-fi
-
 # Ensure everything runs in directory of the parent script
 cd `dirname $0`
 
 echo "Provisioning CLMC service"
 
-./install-tick-stack.sh $@
-./install-neo4j.sh $@
-./install-clmc-service.sh $@
\ No newline at end of file
+./install-tick-stack.sh
+./install-neo4j.sh
+./install-clmc-service.sh
\ No newline at end of file
diff --git a/scripts/clmc-service/nginx.conf b/scripts/clmc-service/nginx.conf
new file mode 100644
index 0000000000000000000000000000000000000000..3cb407fac3096d2683aa50f5a1b60f32ccaa8342
--- /dev/null
+++ b/scripts/clmc-service/nginx.conf
@@ -0,0 +1,67 @@
+user www-data;
+worker_processes auto;
+pid /run/nginx.pid;
+
+events {
+    worker_connections 1024;
+}
+
+http {
+
+    sendfile on;
+    tcp_nopush on;
+    tcp_nodelay on;
+    keepalive_timeout 65;
+    types_hash_max_size 2048;
+
+    include /etc/nginx/mime.types;
+    default_type application/octet-stream;
+
+    access_log /var/log/nginx/access.log;
+    error_log /var/log/nginx/error.log;
+
+    server {
+        listen 80;
+
+        location /kapacitor {
+            proxy_pass http://127.0.0.1:9092; # N.B. No URI in the URL, passes the whole location preserving the prefix
+            proxy_redirect off;
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+            proxy_set_header X-Forwarded-Host $server_name;
+        }
+        location /clmc-service/ {
+            proxy_pass http://127.0.0.1:9080/;
+            proxy_redirect off;
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+            proxy_set_header X-Forwarded-Host $server_name;
+        }
+        location /influxdb/ {
+            proxy_pass http://127.0.0.1:8086/;
+            proxy_redirect off;
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+            proxy_set_header X-Forwarded-Host $server_name;
+        }
+        location /neo4j/ {
+            proxy_pass http://127.0.0.1:7474/;
+            proxy_redirect off;
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+            proxy_set_header X-Forwarded-Host $server_name;
+        }
+        location /chronograf {
+            proxy_pass http://127.0.0.1:8888; # No trailing slash - chronograf is configured to include /chronograf in its routes
+            proxy_redirect off;
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+            proxy_set_header X-Forwarded-Host $server_name;
+        }
+    }
+}
\ No newline at end of file
diff --git a/scripts/test/fixture.sh b/scripts/test/fixture.sh
index 0aa2f4c0d55663007a87daa01e5f8fd7a5783057..725e21d8e58a2df1f129c3c7e0a606d595d0d4ac 100755
--- a/scripts/test/fixture.sh
+++ b/scripts/test/fixture.sh
@@ -50,10 +50,7 @@ create() {
         # provision software into each container
         echo "Provisioning: ${service_name}"
         if [ ${service_name} == "clmc-service" ]; then
-            influxdb_url=$(echo $SERVICE | jq -r '.influxdb_url')
-            database_name=$(echo $SERVICE | jq -r '.database_name')
-            report_period=$(echo $SERVICE | jq -r '.report_period')
-            cmd="/vagrant/scripts/clmc-service/install.sh ${influxdb_url} ${database_name} ${report_period}"
+            cmd="/vagrant/scripts/clmc-service/install.sh"
             echo "Provisioning command ${cmd}"
             lxc exec ${service_name} --env REPO_ROOT="/vagrant" -- ${cmd}
             exit_code=$?
@@ -73,21 +70,19 @@ create() {
         else
             # get container parameters
             location=$(echo $SERVICE | jq -r '.location')
+            sf_package_id=$(echo $SERVICE | jq -r '.sf_package_id')
             sf_id=$(echo $SERVICE | jq -r '.sf_id')
-            sf_id_instance=$(echo $SERVICE | jq -r '.sf_id_instance')
             sfc_id=$(echo $SERVICE | jq -r '.sfc_id')
-            sfc_id_instance=$(echo $SERVICE | jq -r '.sfc_id_instance')
-            sr_id=$(echo $SERVICE | jq -r '.sr_id')
-            ipendpoint_id=$(echo $SERVICE | jq -r '.ipendpoint_id')
+            sfc_instance_id=$(echo $SERVICE | jq -r '.sfc_instance_id')
+            sf_endpoint_id=$(echo $SERVICE | jq -r '.sf_endpoint_id')
             influxdb_url=$(echo $SERVICE | jq -r '.influxdb_url')
-            database_name=$(echo $SERVICE | jq -r '.database_name')
 
             # install service function specific software
-            cmd=/vagrant/src/test/clmctest/services/${sf_id}/install.sh
+            cmd=/vagrant/src/test/clmctest/services/${sf_package_id}/install.sh
             lxc exec ${service_name} --env REPO_ROOT="/vagrant" -- ${cmd}
             exit_code=$?
             if [ $exit_code != 0 ]; then
-                echo "${sf_id} installation failed with exit code ${exit_code}"
+                echo "${sf_package_id} installation failed with exit code ${exit_code}"
                 exit 1
             fi
             # install telegraf
@@ -109,11 +104,11 @@ create() {
             # copy the 'host' config into all service containers
             cp -f ${repo_root}/src/test/clmctest/services/host/telegraf*.conf ${container_dir}/etc/telegraf/telegraf.d/
             # copy the service-specific config
-            cp -f ${repo_root}/src/test/clmctest/services/${sf_id}/telegraf*.conf ${container_dir}/etc/telegraf/telegraf.d/
+            cp -f ${repo_root}/src/test/clmctest/services/${sf_package_id}/telegraf*.conf ${container_dir}/etc/telegraf/telegraf.d/
             chown -R 100000:100000 ${container_dir}/etc/telegraf/
 
             # replace telegraf template with container parameters
-            cmd="/vagrant/scripts/clmc-agent/configure.sh ${location} ${sfc_id} ${sfc_id_instance} ${sf_id} ${sf_id_instance} ${ipendpoint_id} ${sr_id} ${influxdb_url} ${database_name}"
+            cmd="/vagrant/scripts/clmc-agent/configure.sh ${location} ${sfc_id} ${sfc_instance_id} ${sf_package_id} ${sf_id} ${sf_endpoint_id} ${influxdb_url}"
             lxc exec ${service_name} -- ${cmd}
 
             # start telegraf
diff --git a/src/service/.coveragerc b/src/service/.coveragerc
index 9f2b9eaf4f0eb7178a1f8be6f9e3eeeb4c4ccbf4..1166225d2ef504a94bc5810480671027ba661dcc 100644
--- a/src/service/.coveragerc
+++ b/src/service/.coveragerc
@@ -1,9 +1,2 @@
 [run]
-source = clmcservice
-omit =
-        *test*
-        *__init__*
-        clmcservice\aggregation\influx_data_interface.py
-        clmcservice\configapi\views.py
-        clmcservice\whoamiapi\views.py
-# configapi\views and whoami\views are currently omitted since there is no implementation there, yet
\ No newline at end of file
+source = clmcservice
\ No newline at end of file
diff --git a/src/service/MANIFEST.in b/src/service/MANIFEST.in
index eaf16db0e28d54b95ae8f085c1d4c3cfa1b8278d..00dcd53d7ff08d0f1c34f3277e66aa82b23e6169 100644
--- a/src/service/MANIFEST.in
+++ b/src/service/MANIFEST.in
@@ -1,2 +1,6 @@
-include MANIFEST.in
-recursive-include clmcservice
\ No newline at end of file
+include VERSION
+include *coveragerc
+include *.ini
+include clmcservice/static/*
+include resources/GraphAPI/network_config.json
+include resources/TICKscript/*-template.tick
\ No newline at end of file
diff --git a/src/service/VERSION b/src/service/VERSION
new file mode 100644
index 0000000000000000000000000000000000000000..4a2bfa871aa7cbcb89e5d84bf7020312f591bb5d
--- /dev/null
+++ b/src/service/VERSION
@@ -0,0 +1 @@
+__version__ = "1.2.0"
\ No newline at end of file
diff --git a/src/service/clmcservice/__init__.py b/src/service/clmcservice/__init__.py
index ff5cee802accce0f0e9ac0e689abe2e9ac735a64..b710578a689fb7066c777e3143f8883c49e05bf1 100644
--- a/src/service/clmcservice/__init__.py
+++ b/src/service/clmcservice/__init__.py
@@ -22,12 +22,20 @@
 //      Created for Project :   FLAME
 """
 
-from os import path
+
+# Python standard libs
 from json import load
+from os.path import dirname, abspath
+
+# PIP installed libs
 from pyramid.config import Configurator
 from sqlalchemy import engine_from_config
+
+# CLMC-service imports
 from clmcservice.models.meta import DBSession, Base
-from clmcservice.aggregationapi.utilities import validate_conf_file, RUNNING_FLAG, MALFORMED_FLAG, CONF_FILE_ATTRIBUTE, CONF_OBJECT, AGGREGATOR_CONFIG_SECTION
+
+
+ROOT_DIR = dirname(abspath(__file__))  # get the path of the root package (clmcservice) as a global variable
 
 
 def main(global_config, **settings):
@@ -39,13 +47,8 @@ def main(global_config, **settings):
     DBSession.configure(bind=engine)  # bind the engine to a DB session
     Base.metadata.bind = engine  # bind the engine to the Base class metadata
 
-    # validate and use (if valid) the configuration file
-    conf_file_path = settings[CONF_FILE_ATTRIBUTE]
-    conf = validate_conf_file(conf_file_path)  # if None returned here, service is in unconfigured state
-    settings[CONF_OBJECT] = conf
-
-    settings[MALFORMED_FLAG] = False
     settings['influx_port'] = int(settings['influx_port'])  # the influx port setting must be converted to integer instead of a string
+    settings['kapacitor_port'] = int(settings['kapacitor_port'])  # the kapacitor port setting must be converted to integer instead of a string
 
     network_config_file_path = settings["network_configuration_path"]
     with open(network_config_file_path) as f:
@@ -54,22 +57,17 @@ def main(global_config, **settings):
 
     config = Configurator(settings=settings)
 
-    # add routes of the aggregator API
-    config.add_route('aggregator_config', '/aggregator/config')
-    config.add_route('aggregator_controller', '/aggregator/control')
-
     # add routes of the WHOAMI API
     config.add_route('whoami_endpoints', '/whoami/endpoints')
     config.add_route('whoami_endpoints_instance', 'whoami/endpoints/instance')
 
-    # add routes of the CONFIG API
-    config.add_route('config_sfc', '/config/sf-chains')
-    config.add_route('config_sfc_instance', '/config/sf-chains/instance')
-
     # add routes of the GRAPH API
     config.add_route('graph_build', '/graph/temporal')
     config.add_route('graph_manage', '/graph/temporal/{graph_id}')
     config.add_route('graph_algorithms_rtt', '/graph/temporal/{graph_id}/round-trip-time')
 
+    # add routes of the Alerts Configuration API
+    config.add_route('alerts_configuration', '/alerts')
+
     config.scan()  # This method scans the packages and finds any views related to the routes added in the app configuration
     return config.make_wsgi_app()
diff --git a/src/service/clmcservice/aggregation/__init__.py b/src/service/clmcservice/aggregation/__init__.py
deleted file mode 100644
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/aggregation/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/src/service/clmcservice/aggregation/aggregator.py b/src/service/clmcservice/aggregation/aggregator.py
deleted file mode 100644
index 1199562d9b72240bdbeb9d9d29f0ee99b6a19f1c..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/aggregation/aggregator.py
+++ /dev/null
@@ -1,279 +0,0 @@
-#!/usr/bin/python3
-"""
-## © University of Southampton IT Innovation Centre, 2018
-##
-## Copyright in this software belongs to University of Southampton
-## IT Innovation Centre of Gamma House, Enterprise Road,
-## Chilworth Science Park, Southampton, SO16 7NS, UK.
-##
-## This software may not be used, sold, licensed, transferred, copied
-## or reproduced in whole or in part in any manner or form or in or
-## on any media by any person other than in accordance with the terms
-## of the Licence Agreement supplied with the software, or otherwise
-## without the prior written consent of the copyright owners.
-##
-## This software is distributed WITHOUT ANY WARRANTY, without even the
-## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-## PURPOSE, except where stated in the Licence Agreement supplied with
-## the software.
-##
-##      Created By :            Nikolay Stanchev
-##      Created Date :          25-04-2018
-##      Created for Project :   FLAME
-"""
-
-from threading import Thread, Event
-from influxdb import InfluxDBClient
-from time import time, sleep
-from urllib.parse import urlparse
-from clmcservice.aggregationapi.utilities import generate_e2e_delay_report
-import getopt
-import logging
-
-
-class Aggregator(object):
-    """
-    A class used to perform the aggregation feature of the CLMC - aggregating network and media service measurements. Implemented as a separate process.
-    """
-
-    REPORT_PERIOD = 5  # default report period is 5s, that is every 5 seconds the mean delay values for the last 5 seconds are aggregated
-    DATABASE = 'CLMCMetrics'  # default database the aggregator uses
-    DATABASE_URL = 'http://172.40.231.51:8086'  # default database URL the aggregator uses
-
-    def __init__(self, database_name=DATABASE, database_url=DATABASE_URL, report_period=REPORT_PERIOD, logger=None):
-        """
-        Constructs an Aggregator instance.
-
-        :param database_name: database name to use
-        :param database_url: database url to use
-        :param report_period: the report period in seconds
-        """
-
-        if logger is None:
-            self.log = logging.getLogger(__name__)
-        else:
-            self.log = logger
-
-        self.log.info("Connecting to Influx database {0} with URL {1}".format(database_name, database_url))
-        # initialise a database client using the database url and the database name
-        url_object = urlparse(database_url)
-        self.db_client = InfluxDBClient(host=url_object.hostname, port=url_object.port, database=database_name, timeout=10)
-        self.log.info("Successfully connected to Influx database {0} with URL {1}".format(database_name, database_url))
-
-        self.db_url = database_url
-        self.db_name = database_name
-        self.report_period = report_period
-
-        # a cache-like dictionaries to store the last reported values, which can be used to fill in missing values
-        self.network_cache = {}
-        self.service_cache = {}
-
-        # a stop flag event object used to handle the stopping of the process
-        self._stop_flag = Event()
-
-    def stop(self):
-        """
-        Stop the aggregator from running.
-        """
-
-        self.log.info("Aggregator's stop flag has been set.")
-        self._stop_flag.set()
-
-    def run(self):
-        """
-        Performs the functionality of the aggregator - query data from both measurements merge that data and post it back in influx every 5 seconds.
-        """
-
-        self.log.info("Aggregator started running.")
-
-        current_time = int(time())
-        while not self._stop_flag.is_set():
-            self.log.info("Trying to generate an E2E measurement.")
-
-            boundary_time = current_time - self.report_period
-
-            boundary_time_nano = boundary_time * 1000000000
-            current_time_nano = current_time * 1000000000
-
-            # query the network delays and group them by path ID
-            network_delays = {}
-            result = self.db_client.query(
-                'SELECT mean(latency) as "net_latency", mean(bandwidth) as "net_bandwidth" FROM "{0}"."autogen"."network_delays" WHERE time >= {1} and time < {2} GROUP BY path, source, target'.format(
-                    self.db_name, boundary_time_nano, current_time_nano))
-
-            for item in result.items():
-                metadata, result_points = item
-                # measurement = metadata[0]
-                tags = metadata[1]
-
-                result_point = next(result_points)
-                network_delays[(tags['path'], tags['source'], tags['target'])] = result_point['net_latency'], result_point['net_bandwidth']
-                self.network_cache[(tags['path'], tags['source'], tags['target'])] = result_point['net_latency'], result_point['net_bandwidth']
-
-            # query the service delays and group them by endpoint, service function instance and sfr
-            service_delays = {}
-            result = self.db_client.query(
-                'SELECT mean(response_time) as "response_time", mean(request_size) as "request_size", mean(response_size) as "response_size" FROM "{0}"."autogen"."service_delays" WHERE time >= {1} and time < {2} GROUP BY endpoint, sf_instance, sfr'.format(
-                    self.db_name, boundary_time_nano, current_time_nano))
-
-            for item in result.items():
-                metadata, result_points = item
-                # measurement = metadata[0]
-                tags = metadata[1]
-                result_point = next(result_points)
-                service_delays[tags['sfr']] = (result_point['response_time'], result_point['request_size'], result_point['response_size'], tags['endpoint'], tags['sf_instance'])
-                self.service_cache[tags['sfr']] = (result_point['response_time'], result_point['request_size'], result_point['response_size'], tags['endpoint'], tags['sf_instance'])
-
-            # for each network path check if there is a media service delay report for the target sfr - if so, generate an e2e_delay measurement
-            for path in network_delays:
-                path_id, source, target = path
-
-                # check if we have a reverse path without a forward path for a potential aggregated row - e.g. SR3 to SR1 network row with service on SR3 and no row from SR1 to SR3
-                if (source in service_delays or source in self.service_cache) and (path_id, target, source) not in network_delays and (path_id, target, source) in self.network_cache:
-                    # hence search for the forward path in the cache
-                    forward_path = self.network_cache.get((path_id, target, source))
-                    reverse_path = network_delays.get((path_id, source, target))
-                    forward_delay = forward_path[0]
-                    avg_bandwidth = forward_path[1]
-                    reverse_delay = reverse_path[0]
-                    service_delay = service_delays.get(source, self.service_cache.get(source))
-                    response_time, request_size, response_size, endpoint, sf_instance = service_delay
-                    self.db_client.write_points(
-                        generate_e2e_delay_report(path_id, target, source, endpoint, sf_instance, forward_delay, reverse_delay, response_time,
-                                                  request_size, response_size, avg_bandwidth, boundary_time))
-
-                # check if target sfr is reported in service delays, in other words - if there is a media service instance being connected to target sfr
-                if target not in service_delays and target not in self.service_cache:
-                    # if not continue with the other network path reports
-                    continue
-
-                e2e_arguments = {"path_ID": None, "source_SFR": None, "target_SFR": None, "endpoint": None, "sf_instance": None, "delay_forward": None, "delay_reverse": None,
-                                 "delay_service": None, "avg_request_size": None, "avg_response_size": None, "avg_bandwidth": None, "time": boundary_time}
-
-                e2e_arguments['path_ID'] = path_id
-                e2e_arguments['source_SFR'] = source
-                e2e_arguments['target_SFR'] = target
-                e2e_arguments['delay_forward'] = network_delays[path][0]
-                e2e_arguments['avg_bandwidth'] = network_delays[path][1]
-
-                # reverse the path ID to get the network delay for the reversed path
-                reversed_path = (path_id, target, source)
-                if reversed_path in network_delays or reversed_path in self.network_cache:
-                    # get the reverse delay, use the latest value if reported or the cache value
-                    e2e_arguments['delay_reverse'] = network_delays.get(reversed_path, self.network_cache.get(reversed_path))[0]
-                else:
-                    e2e_arguments['delay_reverse'] = None
-
-                # get the response time of the media component connected to the target SFR
-                service_delay = service_delays.get(target, self.service_cache.get(target))
-                response_time, request_size, response_size, endpoint, sf_instance = service_delay
-                # put these points in the e2e arguments dictionary
-                e2e_arguments['delay_service'] = response_time
-                e2e_arguments['avg_request_size'] = request_size
-                e2e_arguments['avg_response_size'] = response_size
-                e2e_arguments['endpoint'] = endpoint
-                e2e_arguments['sf_instance'] = sf_instance
-
-                # if all the arguments of the e2e delay measurements were reported, then generate and post to Influx an E2E measurement row
-                if None not in e2e_arguments.values():
-                    self.db_client.write_points(
-                        generate_e2e_delay_report(e2e_arguments['path_ID'], e2e_arguments['source_SFR'], e2e_arguments['target_SFR'], e2e_arguments['endpoint'],
-                                                  e2e_arguments['sf_instance'], e2e_arguments['delay_forward'], e2e_arguments['delay_reverse'],
-                                                  e2e_arguments['delay_service'],
-                                                  e2e_arguments["avg_request_size"], e2e_arguments['avg_response_size'], e2e_arguments['avg_bandwidth'],
-                                                  e2e_arguments['time']))
-                    self.log.info("Successfully generated an E2E measurement and posted back to Influx.")
-                else:
-                    self.log.info("Couldn't generate an E2E measurement although some of the data could be fetched.")
-
-            old_timestamp = current_time
-            # wait until {report_period) seconds have passed
-            while current_time < old_timestamp + self.report_period:
-                sleep(1)
-                current_time = int(time())
-
-        self.log.info("Aggregator stopped running.")
-
-
-class AggregatorThread(Thread):
-    """
-    A utility class used to wrap around the Aggregator class and return a Thread instance, which can then be used for testing (provides start and stop methods)
-    """
-
-    REPORT_PERIOD = 5  # currently, report period is 5s, that is every 5 seconds the mean delay values for the last 5 seconds are aggregated
-    DATABASE = 'CLMCMetrics'  # default database the aggregator uses
-    DATABASE_URL = 'http://172.40.231.51:8086'  # default database URL the aggregator uses
-
-    def __init__(self, database=DATABASE, database_url=DATABASE_URL, report_period=REPORT_PERIOD):
-        """
-        Constructs an Aggregator instance.
-
-        :param database: database name to use
-        :param database_url: database url to use
-        """
-
-        super(AggregatorThread, self).__init__()  # call the constructor of the thread
-
-        self.aggregator = Aggregator(database_name=database, database_url=database_url, report_period=report_period)
-
-    def stop(self):
-        """
-        A method used to stop the thread.
-        """
-
-        self.aggregator.stop()
-
-    def run(self):
-        """
-        The method to execute when the thread starts.
-        """
-
-        self.aggregator.run()
-
-
-if __name__ == '__main__':
-    # initialise a file logger, only when module's main method is run (NOT when aggregator class is imported somewhere else)
-    log = logging.getLogger('aggregator')
-    hdlr = logging.FileHandler('/var/log/flame/clmc/aggregator.log', mode='a')
-    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
-    hdlr.setFormatter(formatter)
-    log.addHandler(hdlr)
-    log.setLevel(logging.DEBUG)
-
-    # log all errors that are thrown in the execution of the aggregator with the logger object initialized above
-    import sys
-    import traceback
-
-    def report_error(error_type, error_value, error_traceback, log_object=log):
-        log_object.error("Uncaught error thrown!")
-        log_object.error("Error type: {0}".format(error_type))
-        log_object.error("Error value: {0}".format(error_value))
-        log_object.debug("Error traceback:")
-        for trace in traceback.format_tb(error_traceback):
-            log_object.debug(trace)
-
-    sys.excepthook = report_error
-
-    # Parse command line options
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], "p:d:u:", ['period=', 'database=', 'url='])
-
-        arg_period = Aggregator.REPORT_PERIOD
-        arg_database_name = Aggregator.DATABASE
-        arg_database_url = Aggregator.DATABASE_URL
-
-        # Apply parameters if given
-        for opt, arg in opts:
-            if opt in ('-p', '--period'):
-                arg_period = int(arg)
-            elif opt in ('-d', '--database'):
-                arg_database_name = arg
-            elif opt in ('-u', '--url'):
-                arg_database_url = arg
-
-        Aggregator(database_name=arg_database_name, database_url=arg_database_url, report_period=arg_period, logger=log).run()
-
-    # log.info the error messages in case of a parse error
-    except getopt.GetoptError as err:
-        log.info(err)
-        log.info('Parse error; run the script using the following format: python aggregator.py -p <seconds> -d <database name> -u <database url>')
diff --git a/src/service/clmcservice/aggregation/influx_data_interface.py b/src/service/clmcservice/aggregation/influx_data_interface.py
deleted file mode 100644
index c6781d0c35d7d77c68932499591e9e67968c637d..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/aggregation/influx_data_interface.py
+++ /dev/null
@@ -1,241 +0,0 @@
-#!/usr/bin/python3
-"""
-## © University of Southampton IT Innovation Centre, 2018
-##
-## Copyright in this software belongs to University of Southampton
-## IT Innovation Centre of Gamma House, Enterprise Road,
-## Chilworth Science Park, Southampton, SO16 7NS, UK.
-##
-## This software may not be used, sold, licensed, transferred, copied
-## or reproduced in whole or in part in any manner or form or in or
-## on any media by any person other than in accordance with the terms
-## of the Licence Agreement supplied with the software, or otherwise
-## without the prior written consent of the copyright owners.
-##
-## This software is distributed WITHOUT ANY WARRANTY, without even the
-## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-## PURPOSE, except where stated in the Licence Agreement supplied with
-## the software.
-##
-##      Created By :            Nikolay Stanchev
-##      Created Date :          04-06-2018
-##      Created for Project :   FLAME
-"""
-
-
-from clmcservice.aggregationapi.utilities import generate_e2e_delay_report
-
-"""
-A python module which provides auxiliary functions to mimic the behaviour of an InfluxDBClient when unit testing the aggregator.
-"""
-
-
-class MockResultSet(object):
-    """
-    A mock object used to mimic the behaviour of a ResultSet in the influx library (we only need the functionality of an object that collects
-    a group of points and has an items() method to get the collected points.
-    """
-
-    def __init__(self, points):
-        """
-        Initialise the mock result set.
-
-        :param points: the collected points
-        """
-
-        self.points = points
-
-    def items(self):
-        """
-        Get the data points in the result set.
-
-        :return: the collected data points
-        """
-
-        return self.points
-
-
-# The following are network-related auxiliary functions to generate test data.
-
-def _network_result_point(net_latency, net_bandwidth):
-    """
-    Returns a generator, which yields one data point representing a network measurement (fields only)
-
-    :param net_latency: the reported network latency
-    :param net_bandwidth: the reported network bandwidth.
-
-    :return: a generator object with one element (same behaviour is used in the influxdb library even when only one point is returned from the query)
-    """
-
-    yield {"net_latency": net_latency, "net_bandwidth": net_bandwidth}
-
-
-def _network_tags(path, source, target):
-    """
-    Returns a dictionary representing a network measurement (tags only)
-
-    :param path: the path identifier
-    :param source: the source service router
-    :param target: the target service router
-
-    :return: a dictionary with those values
-    """
-
-    return {"path": path, "source": source, "target": target}
-
-
-def _network_metadata(measurement, path, source, target):
-    """
-    Returns an influxdb-styled metadata about a network measurement.
-
-    :param measurement: the measurement table name
-    :param path: the path identifier
-    :param source: the source service router
-    :param target: the target service router
-
-    :return: a tuple with the first element being the measurement name and the second element being a dictionary with the network measurement tag values
-    """
-
-    return measurement, _network_tags(path, source, target)
-
-
-def network_result_item(measurement, path, source, target, net_latency, net_bandwidth):
-    """
-    Returns a full influxdb-styled network measurement item - with tag and field values.
-
-    :param measurement: the measurement table name
-    :param path: the path identifier
-    :param source: the source service router
-    :param target: the target service router
-    :param net_latency: the reported network latency
-    :param net_bandwidth: the reported network bandwidth.
-
-    :return: a tuple with the first element being the result metadata (measurement name and tags) and the second element being the data field points
-    """
-
-    return _network_metadata(measurement, path, source, target), _network_result_point(net_latency, net_bandwidth)
-
-
-# The following are service-related auxiliary functions to generate test data.
-
-def _service_result_point(response_time, request_size, response_size):
-    """
-    Returns a generator, which yields one data point representing a service measurement (fields only)
-
-    :param response_time: the response time of the service
-    :param request_size: the averaged request size of the service
-    :param response_size: the averaged response size of the service
-
-    :return: a generator object with one element (same behaviour is used in the influxdb library even when only one point is returned from the query)
-    """
-
-    yield {"response_time": response_time, "request_size": request_size, "response_size": response_size}
-
-
-def _service_tags(sfr, endpoint, sf_instance):
-    """
-    Returns a dictionary representing a service measurement (tags only)
-
-    :param sfr: the service router to which the service's endpoint is connected to
-    :param endpoint: the endpoint the service is being deployed on
-    :param sf_instance: the service function instance (FQDN)
-
-    :return: a dictionary with those values
-    """
-
-    return {"sfr": sfr,  "endpoint": endpoint, "sf_instance": sf_instance}
-
-
-def _service_metadata(measurement, sfr, endpoint, sf_instance):
-    """
-    Returns an influxdb-styled metadata about a service measurement.
-
-    :param measurement: the measurement table name
-    :param sfr: the service router to which the service's endpoint is connected to
-    :param endpoint: the endpoint the service is being deployed on
-    :param sf_instance: the service function instance (FQDN)
-
-    :return: a tuple with the first element being the measurement name and the second element being a dictionary with the service measurement tag values
-    """
-
-    return measurement, _service_tags(sfr, endpoint, sf_instance)
-
-
-def service_result_item(measurement, sfr, endpoint, sf_instance, response_time, request_size, response_size):
-    """
-    Returns a full influxdb-styled service measurement item - with tag and field values.
-
-    :param measurement: the measurement table name
-    :param sfr: the service router to which the service's endpoint is connected to
-    :param endpoint: the endpoint the service is being deployed on
-    :param sf_instance: the service function instance (FQDN)
-    :param response_time: the response time of the service
-    :param request_size: the averaged request size of the service
-    :param response_size: the averaged response size of the service
-
-    :return: a tuple with the first element being the result metadata (measurement name and tags) and the second element being the data field points
-    """
-
-    return _service_metadata(measurement, sfr, endpoint, sf_instance), _service_result_point(response_time, request_size, response_size)
-
-
-# The following are auxiliary functions for generating an e2e row used in the unit testing of the aggregator.
-
-def drop_timestamp(d):
-    """
-    Drops the time stamp from a dictionary-represented influx result item object
-
-    :param d: the dictionary object representing a measurement row from influx
-
-    :return: the same dictionary with no timestamp
-    """
-
-    d.pop('time')
-    return d
-
-
-def _generate_e2e_row(path_id, source_sfr, target_sfr, endpoint, sf_instance, delay_forward, delay_reverse, delay_service, avg_request_size, avg_response_size, avg_bandwidth):
-    """
-    Generates a combined averaged measurement about the e2e delay and its contributing parts with default timestamp (set as 0)
-
-    :param path_id: The path identifier, which is a bidirectional path ID for the request and the response path
-    :param source_sfr: source service router
-    :param target_sfr: target service router
-    :param endpoint: endpoint of the media component
-    :param sf_instance: service function instance (media component)
-    :param delay_forward: Path delay (Forward direction)
-    :param delay_reverse: Path delay (Reverse direction)
-    :param delay_service: the media service component response time
-    :param avg_request_size: averaged request size
-    :param avg_response_size: averaged response size
-    :param avg_bandwidth: averaged bandwidth
-
-    :return: a list of dict-formatted reports to post on influx
-    """
-
-    return generate_e2e_delay_report(path_id, source_sfr, target_sfr, endpoint, sf_instance, delay_forward,
-                                     delay_reverse, delay_service, avg_request_size, avg_response_size, avg_bandwidth, 0)[0]
-
-
-def generate_e2e_no_timestamp_row(path_id, source_sfr, target_sfr, endpoint, sf_instance, delay_forward, delay_reverse,
-                                  delay_service, avg_request_size, avg_response_size, avg_bandwidth):
-    """
-    Generates a combined averaged measurement about the e2e delay and its contributing parts (with no timestamp, used for testing)
-
-    :param path_id: The path identifier, which is a bidirectional path ID for the request and the response path
-    :param source_sfr: source service router
-    :param target_sfr: target service router
-    :param endpoint: endpoint of the media component
-    :param sf_instance: service function instance (media component)
-    :param delay_forward: Path delay (Forward direction)
-    :param delay_reverse: Path delay (Reverse direction)
-    :param delay_service: the media service component response time
-    :param avg_request_size: averaged request size
-    :param avg_response_size: averaged response size
-    :param avg_bandwidth: averaged bandwidth
-
-    :return: a list of dict-formatted reports to post on influx
-    """
-
-    return drop_timestamp(_generate_e2e_row(path_id, source_sfr, target_sfr, endpoint, sf_instance, delay_forward, delay_reverse, delay_service,
-                                             avg_request_size, avg_response_size, avg_bandwidth))
diff --git a/src/service/clmcservice/aggregation/test_aggregator.py b/src/service/clmcservice/aggregation/test_aggregator.py
deleted file mode 100644
index fd5befca9f9619b93415ab2205e42abe3994b2d6..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/aggregation/test_aggregator.py
+++ /dev/null
@@ -1,218 +0,0 @@
-#!/usr/bin/python3
-"""
-## © University of Southampton IT Innovation Centre, 2018
-##
-## Copyright in this software belongs to University of Southampton
-## IT Innovation Centre of Gamma House, Enterprise Road,
-## Chilworth Science Park, Southampton, SO16 7NS, UK.
-##
-## This software may not be used, sold, licensed, transferred, copied
-## or reproduced in whole or in part in any manner or form or in or
-## on any media by any person other than in accordance with the terms
-## of the Licence Agreement supplied with the software, or otherwise
-## without the prior written consent of the copyright owners.
-##
-## This software is distributed WITHOUT ANY WARRANTY, without even the
-## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-## PURPOSE, except where stated in the Licence Agreement supplied with
-## the software.
-##
-##      Created By :            Nikolay Stanchev
-##      Created Date :          04-06-2018
-##      Created for Project :   FLAME
-"""
-
-
-from threading import Event
-from unittest import mock
-from clmcservice.aggregation.aggregator import AggregatorThread
-from clmcservice.aggregation.influx_data_interface import MockResultSet, network_result_item, service_result_item, drop_timestamp, generate_e2e_no_timestamp_row
-
-
-class TestAggregation(object):
-    """
-    A unit test to ensure the functionality of the aggregator is correct.
-    """
-
-    ACTUAL_RESULTS = "actual_aggregated_results"  # the attribute name of the actual results data structure
-    EXPECTED_RESULTS = "expected_aggregated_results"  # the attribute name of the expected results data structure
-    FINISHED = "finished_event"  # the attribute name of the flag object, which marks the end of the test
-
-    def points_generator(self, network_items, service_items):
-        """
-        A generator method intended to be used by the mock db client when involving the mocked query() method. It takes the network and service items, and generates a result from
-        those items each time query() is called by taking turns - starts with network result, followed by service result and then it repeats, until all items have been exhausted.
-        Network items and service items are expected to have the same length.
-
-        :param network_items: the network data to generate from
-        :param service_items: the service data to generate from
-
-        :return: a generator object
-        """
-
-        assert len(network_items) == len(service_items), "The data points generator must receive the same number of network items as the number of service items"
-        index = 0
-
-        while not getattr(self, self.FINISHED).is_set():
-            items = network_items[index]
-            yield MockResultSet(items)
-
-            items = service_items[index]
-
-            # before yielding the service data points, check if both sets of data points are enumerated
-            if index == len(network_items)-1:
-                # if so, set the finished flag of the test
-                getattr(self, self.FINISHED).set()
-
-            yield MockResultSet(items)
-
-            index += 1
-
-    def setup_mock_db_client(self, mock_class):
-        """
-        Sets up a mock db client and also defines the expected aggregation results from the test.
-
-        :param mock_class: the mock class used as an influx db client instance
-        :return:
-        """
-
-        setattr(self, self.ACTUAL_RESULTS, [])  # initially, there are no actual results, these are built progressively while the aggregator is running
-        setattr(self, self.EXPECTED_RESULTS, [
-            generate_e2e_no_timestamp_row(path_id="SR1-SR3", source_sfr="SR1", target_sfr="SR3", endpoint="endpoint3", sf_instance="ms1.flame.org", delay_forward=10,
-                                          delay_reverse=15, delay_service=10, avg_request_size=1024, avg_response_size=8, avg_bandwidth=104857600),
-            generate_e2e_no_timestamp_row(path_id="SR1-SR3", source_sfr="SR1", target_sfr="SR3", endpoint="endpoint3", sf_instance="ms1.flame.org", delay_forward=5,
-                                          delay_reverse=25, delay_service=40, avg_request_size=16, avg_response_size=2048, avg_bandwidth=104857600),
-            generate_e2e_no_timestamp_row(path_id="SR1-SR2", source_sfr="SR1", target_sfr="SR2", endpoint="endpoint2", sf_instance="ms2.flame.org", delay_forward=15,
-                                          delay_reverse=35, delay_service=60, avg_request_size=32, avg_response_size=1024, avg_bandwidth=104857600),
-            generate_e2e_no_timestamp_row(path_id="SR4-SR5", source_sfr="SR4", target_sfr="SR5", endpoint="endpoint5", sf_instance="ms5.flame.org", delay_forward=11,
-                                          delay_reverse=25, delay_service=50, avg_request_size=2048, avg_response_size=32, avg_bandwidth=104857600),
-            generate_e2e_no_timestamp_row(path_id="SR1-SR2", source_sfr="SR1", target_sfr="SR2", endpoint="endpoint2", sf_instance="ms2.flame.org", delay_forward=12,
-                                          delay_reverse=5, delay_service=60, avg_request_size=32, avg_response_size=1024, avg_bandwidth=104857600),
-            generate_e2e_no_timestamp_row(path_id="SR1-SR3", source_sfr="SR1", target_sfr="SR3", endpoint="endpoint3", sf_instance="ms1.flame.org", delay_forward=16,
-                                          delay_reverse=25, delay_service=40, avg_request_size=16, avg_response_size=2048, avg_bandwidth=104857600),
-            generate_e2e_no_timestamp_row(path_id="SR10-SR12", source_sfr="SR12", target_sfr="SR10", endpoint="endpoint10", sf_instance="ms4.flame.org", delay_forward=22,
-                                          delay_reverse=3, delay_service=75, avg_request_size=1024, avg_response_size=64, avg_bandwidth=104857600),
-            generate_e2e_no_timestamp_row(path_id="SR14-SR15", source_sfr="SR14", target_sfr="SR15", endpoint="endpoint15", sf_instance="ms2.flame.org", delay_forward=24,
-                                          delay_reverse=27, delay_service=105, avg_request_size=1024, avg_response_size=128, avg_bandwidth=104857600),
-            generate_e2e_no_timestamp_row(path_id="SR14-SR15", source_sfr="SR15", target_sfr="SR14", endpoint="endpoint14", sf_instance="ms1.flame.org", delay_forward=27,
-                                          delay_reverse=24, delay_service=85, avg_request_size=32, avg_response_size=64, avg_bandwidth=104857600),
-            generate_e2e_no_timestamp_row(path_id="SR8-SR18", source_sfr="SR18", target_sfr="SR8", endpoint="endpoint8", sf_instance="ms2.flame.org", delay_forward=18,
-                                          delay_reverse=19, delay_service=75, avg_request_size=2048, avg_response_size=16, avg_bandwidth=104857600),
-        ])  # defines the expected rows from the aggregation
-        setattr(self, self.FINISHED, Event())
-
-        # initialises the influx data generator, which is involved each time the query() method of the mock db client is called
-        mock_points = self.points_generator(
-            # network items is a list of tuples, each tuple represents a result from a query; each time query() is called and a network measurement must be generated, then one of
-            # these tuples is generated, empty tuple means result with no points
-            network_items=[
-                (
-                    network_result_item(measurement="network_delays", path="SR1-SR3", source="SR1", target="SR3", net_latency=10, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR1-SR3", source="SR3", target="SR1", net_latency=15, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR1-SR33", source="SR1", target="SR33", net_latency=15, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR2-SR11", source="SR11", target="SR2", net_latency=15, net_bandwidth=104857600),
-                ),
-                (
-                    network_result_item(measurement="network_delays", path="SR1-SR3", source="SR1", target="SR3", net_latency=5, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR1-SR3", source="SR3", target="SR1", net_latency=25, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR1-SR2", source="SR1", target="SR2", net_latency=15, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR1-SR2", source="SR2", target="SR1", net_latency=35, net_bandwidth=104857600),
-                ),
-                (
-                    network_result_item(measurement="network_delays", path="SR4-SR5", source="SR4", target="SR5", net_latency=5, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR4-SR5", source="SR5", target="SR4", net_latency=25, net_bandwidth=104857600),
-                ),
-                (),
-                (
-                    network_result_item(measurement="network_delays", path="SR4-SR5", source="SR4", target="SR5", net_latency=5, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR4-SR5", source="SR5", target="SR4", net_latency=25, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR0-SR1", source="SR0", target="SR1", net_latency=5, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR0-SR1", source="SR1", target="SR0", net_latency=25, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR10-SR12", source="SR10", target="SR12", net_latency=11, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR10-SR12", source="SR12", target="SR10", net_latency=22, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR14-SR15", source="SR14", target="SR15", net_latency=24, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR14-SR15", source="SR15", target="SR14", net_latency=26, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR8-SR18", source="SR18", target="SR8", net_latency=18, net_bandwidth=104857600),
-                ),
-                (
-                    network_result_item(measurement="network_delays", path="SR4-SR5", source="SR4", target="SR5", net_latency=11, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR1-SR2", source="SR1", target="SR2", net_latency=12, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR1-SR2", source="SR2", target="SR1", net_latency=5, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR1-SR3", source="SR1", target="SR3", net_latency=16, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR10-SR12", source="SR10", target="SR12", net_latency=3, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR14-SR15", source="SR15", target="SR14", net_latency=27, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR16-SR17", source="SR16", target="SR17", net_latency=27, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR8-SR18", source="SR8", target="SR18", net_latency=19, net_bandwidth=104857600),
-                )
-            ],
-            # service items is a list of tuples, each tuple represents a result from a query; each time query() is called and a service measurement must be generated, then one of
-            # these tuples is generated, empty tuple means result with no points
-            service_items=[
-                (
-                    service_result_item(measurement="service_delays", sfr="SR3", endpoint="endpoint3", sf_instance="ms1.flame.org", response_time=10, request_size=1024, response_size=8),
-                    service_result_item(measurement="service_delays", sfr="SR33", endpoint="endpoint33", sf_instance="ms2.flame.org", response_time=20, request_size=4096, response_size=8),
-                    service_result_item(measurement="service_delays", sfr="SR11", endpoint="endpoint11", sf_instance="ms3.flame.org", response_time=30, request_size=1024, response_size=8),
-                ),
-                (
-                    service_result_item(measurement="service_delays", sfr="SR3", endpoint="endpoint3", sf_instance="ms1.flame.org", response_time=40, request_size=16, response_size=2048),
-                    service_result_item(measurement="service_delays", sfr="SR2", endpoint="endpoint2", sf_instance="ms2.flame.org", response_time=60, request_size=32, response_size=1024),
-                ),
-                (
-                    service_result_item(measurement="service_delays", sfr="SR6", endpoint="endpoint6", sf_instance="ms1.flame.org", response_time=60, request_size=1024, response_size=8),
-                    service_result_item(measurement="service_delays", sfr="SR7", endpoint="endpoint7", sf_instance="ms1.flame.org", response_time=70, request_size=1024, response_size=8),
-                ),
-                (
-                    service_result_item(measurement="service_delays", sfr="SR6", endpoint="endpoint6", sf_instance="ms1.flame.org", response_time=65, request_size=2048, response_size=16),
-                    service_result_item(measurement="service_delays", sfr="SR8", endpoint="endpoint8", sf_instance="ms2.flame.org", response_time=75, request_size=2048, response_size=16),
-                    service_result_item(measurement="service_delays", sfr="SR9", endpoint="endpoint9", sf_instance="ms3.flame.org", response_time=25, request_size=2048, response_size=16),
-                ),
-                (),
-                (
-                    service_result_item(measurement="service_delays", sfr="SR5", endpoint="endpoint5", sf_instance="ms5.flame.org", response_time=50, request_size=2048, response_size=32),
-                    service_result_item(measurement="service_delays", sfr="SR10", endpoint="endpoint10", sf_instance="ms4.flame.org", response_time=75, request_size=1024, response_size=64),
-                    service_result_item(measurement="service_delays", sfr="SR15", endpoint="endpoint15", sf_instance="ms2.flame.org", response_time=105, request_size=1024, response_size=128),
-                    service_result_item(measurement="service_delays", sfr="SR14", endpoint="endpoint14", sf_instance="ms1.flame.org", response_time=85, request_size=32, response_size=64),
-                    service_result_item(measurement="service_delays", sfr="SR16", endpoint="endpoint16", sf_instance="ms1.flame.org", response_time=85, request_size=32, response_size=64),
-                )
-            ]
-        )
-
-        # implement the query() and write_points() methods of the mock db client
-        mock_class.query = lambda query: next(mock_points)  # query() returns the next element of the mock_points generator
-        mock_class.write_points = lambda points: getattr(self, self.ACTUAL_RESULTS).append(drop_timestamp(points[0]))  # write_points() adds aggregated rows to actual results list
-
-        # in the end of the test, we can compare the expected results with the actual results that were generated during the aggregation process
-
-    @mock.patch('clmcservice.aggregation.aggregator.InfluxDBClient', autospec=True)
-    def test_aggregator(self, MockDBClient):
-        """
-        The actual test that's executed when running pytest.
-
-        :param MockDBClient: a mock object argument passed by the mock.patch decorator. The decorator changes all occurrences of InfluxDBClient in the aggregator's code to the
-        return value of this MockDBClient object
-        """
-
-        # set up the mock db client by providing implementations for the necessary methods (query and write_points)
-        self.setup_mock_db_client(MockDBClient.return_value)
-
-        # start the aggregator as a thread, report period set to 2 so that the unit tests is not taking too long
-        t = AggregatorThread(report_period=2)
-        t.start()
-
-        # wait until the finished flag has been set
-        getattr(self, self.FINISHED).wait()
-
-        # stop the thread when the aggregation has finished
-        t.stop()
-
-        # compare the expected results with teh actual results that were collected during the aggregation process
-        expected_results = getattr(self, self.EXPECTED_RESULTS)
-        actual_results = getattr(self, self.ACTUAL_RESULTS)
-        assert type(actual_results) is list
-        assert type(expected_results) is list
-        assert len(actual_results) == len(expected_results), "Actual and expected result differ in length."
-
-        # we compare sorted versions of the expected and actual results; this is because the aggregator implementation uses dictionary for efficiency purposes, hence the order of
-        # the collected results may vary, especially on different OS; hence, we only care about the two list of results to contain the same elements
-        assert sorted(actual_results, key=lambda k: k['tags']['path_ID']) == sorted(expected_results, key=lambda k: k['tags']['path_ID']), \
-            "Test failure - aggregation process returns incorrect results."
diff --git a/src/service/clmcservice/aggregationapi/__init__.py b/src/service/clmcservice/aggregationapi/__init__.py
deleted file mode 100644
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/aggregationapi/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/src/service/clmcservice/aggregationapi/tests.py b/src/service/clmcservice/aggregationapi/tests.py
deleted file mode 100644
index 086f3d378bc500f590ee386c6a50b246b04dac37..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/aggregationapi/tests.py
+++ /dev/null
@@ -1,548 +0,0 @@
-#!/usr/bin/python3
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          15-05-2018
-//      Created for Project :   FLAME
-"""
-
-from pyramid import testing
-from pyramid.httpexceptions import HTTPBadRequest
-from time import sleep
-from clmcservice.aggregationapi.utilities import CONF_FILE_ATTRIBUTE, CONF_OBJECT, AGGREGATOR_CONFIG_SECTION, CONFIG_ATTRIBUTES, PROCESS_ATTRIBUTE, RUNNING_FLAG, MALFORMED_FLAG, URL_REGEX
-import pytest
-import os
-import signal
-import configparser
-
-
-class TestAggregatorAPI(object):
-    """
-    A pytest-implementation test for the aggregator API calls
-    """
-
-    @pytest.fixture(autouse=True)
-    def app_config(self):
-        """
-        A fixture to implement setUp/tearDown functionality for all tests by initializing configuration structure for the web service
-        """
-
-        self.registry = testing.setUp()
-        config = configparser.ConfigParser()
-        config[AGGREGATOR_CONFIG_SECTION] = {'aggregator_report_period': 5, 'aggregator_database_name': 'CLMCMetrics', 'aggregator_database_url': "http://172.40.231.51:8086"}
-        self.registry.add_settings({'configuration_object': config, 'aggregator_running': False, 'malformed': False, 'configuration_file_path': "/etc/flame/clmc/service.conf"})
-
-        yield
-
-        testing.tearDown()
-
-    def test_GET_config(self):
-        """
-        Tests the GET method for the configuration of the aggregator.
-        """
-
-        from clmcservice.aggregationapi.views import AggregatorConfig  # nested import so that importing the class view is part of the test itself
-
-        setup_config = self.registry.get_settings()[CONF_OBJECT]
-
-        # test an error is thrown when aggregator is in unconfigured state
-        self.registry.get_settings()[CONF_OBJECT] = None
-        request = testing.DummyRequest()
-        error_raised = False
-        try:
-            AggregatorConfig(request).get()
-        except HTTPBadRequest:
-            error_raised = True
-        assert error_raised, "Error must be raised in case of a not configured aggregator."
-        self.registry.get_settings()[CONF_OBJECT] = setup_config
-
-        # test GET method when aggregator is configured
-        assert int(self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_report_period')) == 5, "Initial report period is 5 seconds."
-        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_name') == 'CLMCMetrics', "Initial database name the aggregator uses is CLMCMetrics."
-        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_url') == "http://172.40.231.51:8086", "Initial aggregator url is http://172.40.231.51:8086"
-
-        request = testing.DummyRequest()
-        response = AggregatorConfig(request).get()
-
-        assert response == {'aggregator_report_period': 5,
-                            'aggregator_database_name': 'CLMCMetrics',
-                            'aggregator_database_url': "http://172.40.231.51:8086"}, "Response must be a dictionary representing a JSON object with the correct configuration data of the aggregator."
-
-        assert int(self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_report_period')) == 5, "A GET request must not modify the aggregator configuration data."
-        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_name') == 'CLMCMetrics', "A GET request must not modify the aggregator configuration data."
-        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_url') == "http://172.40.231.51:8086", "A GET request must not modify the aggregator configuration data."
-
-    @pytest.mark.parametrize("input_body, output_value", [
-        ('{"aggregator_report_period": 10, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://171.40.231.51:8086"}',
-         {'aggregator_report_period': 10, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://171.40.231.51:8086"}),
-        ('{"aggregator_report_period": 15, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.50.231.51:8086"}',
-         {'aggregator_report_period': 15, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://172.50.231.51:8086"}),
-        ('{"aggregator_report_period": 20, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.60.231.51:8086"}',
-         {'aggregator_report_period': 20, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://172.60.231.51:8086"}),
-        ('{"aggregator_report_period": 25, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.60.231.51:8086"}',
-         {'aggregator_report_period': 25, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://172.60.231.51:8086"}),
-        ('{"aggregator_report_period": 200, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "https://172.50.231.51:8086"}',
-         {'aggregator_report_period': 200, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "https://172.50.231.51:8086"}),
-        ('{"aggregator_report_period": 150, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "https://localhost:8086"}',
-         {'aggregator_report_period': 150, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "https://localhost:8086"}),
-        ("{aggregator_report_period: 2hb5, aggregator_database_name: CLMCMetrics, aggregator_database_url: http://172.60.231.51:8086}", None),
-        ("{aggregator_report_period: 250-, aggregator_database_name: CLMCMetrics, aggregator_database_url: http://172.60.231.52:8086}", None),
-        ("{aggregator_report_period: 25, aggregator_database_name: CLMCMetrics, aggregator_database_url: ftp://172.60.231.51:8086}", None),
-        ("{aggregator_report_period: 25, aggregator_database_name: CLMCMetrics, aggregator_database_url: http://172.60.231.51:8086/query param}", None),
-        ("{aggregator_report_period: 250, aggregator_database_name: CLMCMetrics, aggregator_database_url: http://172.60.231.52:808686}", None),
-        ("{}", None),
-        ("{aggregator_running: true}", None),
-    ])
-    def test_PUT_config(self, input_body, output_value):
-        """
-        Tests the PUT method for the configuration of the aggregator
-        :param input_body: the input body parameter
-        :param output_value: the expected output value, None for expecting an Exception
-        """
-
-        from clmcservice.aggregationapi.views import AggregatorConfig, AggregatorController  # nested import so that importing the class view is part of the test itself
-
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert int(self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_report_period')) == 5, "Initial report period is 5 seconds."
-        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_name') == 'CLMCMetrics', "Initial database name the aggregator uses is CLMCMetrics."
-        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_url') == "http://172.40.231.51:8086", "Initial aggregator url is http://172.40.231.51:8086"
-
-        request = testing.DummyRequest()
-        request.body = input_body.encode(request.charset)
-
-        if output_value is not None:
-            response = AggregatorConfig(request).put()
-            assert response == output_value, "Response of PUT request must include the new configuration of the aggregator"
-
-            for attribute in CONFIG_ATTRIBUTES:
-                assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION][attribute] == str(output_value[attribute]), "Aggregator settings configuration is not updated."
-
-            assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Aggregator running status should not be updated after a configuration update."
-
-            # assert that the conf file is updated
-            updated_conf = configparser.ConfigParser()
-            conf_file = self.registry.get_settings().get(CONF_FILE_ATTRIBUTE)
-            assert updated_conf.read(conf_file) == [conf_file]
-            assert AGGREGATOR_CONFIG_SECTION in updated_conf.sections()
-
-            for attribute in CONFIG_ATTRIBUTES:
-                assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION][attribute] == updated_conf[AGGREGATOR_CONFIG_SECTION][attribute], "Aggregator settings configuration is not updated."
-
-        else:
-            error_raised = False
-            try:
-                AggregatorConfig(request).put()
-            except HTTPBadRequest:
-                error_raised = True
-
-            assert error_raised, "Error must be raised in case of an invalid argument."
-
-    def test_start(self):
-        """
-        Tests starting the aggregator through an API call.
-        """
-
-        from clmcservice.aggregationapi.views import AggregatorController  # nested import so that importing the class view is part of the test itself
-
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
-
-        # test starting the aggregator when in unconfigured state
-        setup_config = self.registry.get_settings()[CONF_OBJECT]
-        self.registry.get_settings()[CONF_OBJECT] = None
-        request = testing.DummyRequest()
-        input_body = '{"action": "start"}'
-        request.body = input_body.encode(request.charset)
-        error_raised = False
-        try:
-            AggregatorController(request).put()
-        except HTTPBadRequest:
-            error_raised = True
-        assert error_raised, "Error must be raised in case of a not configured aggregator."
-        self.registry.get_settings()[CONF_OBJECT] = setup_config
-
-        # test starting the aggregation when in configured state
-        request = testing.DummyRequest()
-        input_body = '{"action": "start"}'
-        request.body = input_body.encode(request.charset)
-
-        response = AggregatorController(request).put()
-        assert response == {RUNNING_FLAG: True}, "The aggregator should have been started."
-        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been started."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is not None, "Aggregator process should have been initialized."
-
-        # kill the started process after the test is over
-        pid = request.registry.settings[PROCESS_ATTRIBUTE].pid
-        os.kill(pid, signal.SIGTERM)
-
-    def test_stop(self):
-        """
-        Tests stopping the aggregator through an API call.
-        """
-
-        from clmcservice.aggregationapi.views import AggregatorController  # nested import so that importing the class view is part of the test itself
-
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
-
-        # test stopping the aggregator when in unconfigured state
-        setup_config = self.registry.get_settings()[CONF_OBJECT]
-        self.registry.get_settings()[CONF_OBJECT] = None
-        request = testing.DummyRequest()
-        input_body = '{"action": "stop"}'
-        request.body = input_body.encode(request.charset)
-        error_raised = False
-        try:
-            AggregatorController(request).put()
-        except HTTPBadRequest:
-            error_raised = True
-        assert error_raised, "Error must be raised in case of a not configured aggregator."
-        self.registry.get_settings()[CONF_OBJECT] = setup_config
-
-        # test stopping the aggregation when in configured state
-        # send a start request to trigger the aggregator
-        request = testing.DummyRequest()
-        input_body = '{"action": "start"}'
-        request.body = input_body.encode(request.charset)
-        AggregatorController(request).put()
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is not None, "Aggregator process should have been initialized."
-        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Aggregator process should have been initialized."
-
-        # test stopping the aggregator process when it is running
-        request = testing.DummyRequest()
-        input_body = '{"action": "stop"}'
-        request.body = input_body.encode(request.charset)
-
-        response = AggregatorController(request).put()
-        assert response == {RUNNING_FLAG: False}, "The aggregator should have been stopped."
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been stopped."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Aggregator process should have been terminated."
-
-        sleep(2)  # put a 2 seconds timeout so that the aggregator process can terminate
-
-        # test stopping the aggregator process when it is not running
-        request = testing.DummyRequest()
-        input_body = '{"action": "stop"}'
-        request.body = input_body.encode(request.charset)
-
-        response = AggregatorController(request).put()
-        assert response == {RUNNING_FLAG: False}, "The aggregator should have been stopped."
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been stopped."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Aggregator process should have been terminated."
-
-    def test_restart(self):
-        """
-        Tests restarting the aggregator through an API call.
-        """
-
-        from clmcservice.aggregationapi.views import AggregatorController  # nested import so that importing the class view is part of the test itself
-
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
-
-        # test restarting the aggregator when in unconfigured state
-        setup_config = self.registry.get_settings()[CONF_OBJECT]
-        self.registry.get_settings()[CONF_OBJECT] = None
-        request = testing.DummyRequest()
-        input_body = '{"action": "restart"}'
-        request.body = input_body.encode(request.charset)
-        error_raised = False
-        try:
-            AggregatorController(request).put()
-        except HTTPBadRequest:
-            error_raised = True
-        assert error_raised, "Error must be raised in case of a not configured aggregator."
-        self.registry.get_settings()[CONF_OBJECT] = setup_config
-
-        # test restarting the aggregation when in configured state
-        # test restarting the aggregator process when it is stopped
-        request = testing.DummyRequest()
-        input_body = '{"action": "restart"}'
-        request.body = input_body.encode(request.charset)
-
-        response = AggregatorController(request).put()
-        assert response == {RUNNING_FLAG: True}, "The aggregator should have been restarted."
-        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been restarted."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE), "The aggregator process should have been reinitialised."
-
-        # test restarting the aggregator process when it is running
-        request = testing.DummyRequest()
-        input_body = '{"action": "restart"}'
-        request.body = input_body.encode(request.charset)
-
-        response = AggregatorController(request).put()
-        assert response == {RUNNING_FLAG: True}, "The aggregator should have been restarted."
-        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been restarted."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE), "The aggregator process should have been reinitialised."
-
-        # kill the started process after the test is over
-        pid = request.registry.settings[PROCESS_ATTRIBUTE].pid
-        os.kill(pid, signal.SIGTERM)
-
-    @pytest.mark.parametrize("input_body", [
-        '{"action": "malformed"}',
-        '{"action": true}',
-        '{"action": false}',
-        '{"action": 1}',
-        '{invalid-json}',
-        '{"action": "start", "unneeded_argument": false}',
-        '{}'
-    ])
-    def test_malformed_actions(self, input_body):
-        """
-        Tests sending a malformed type of action to the aggregator through an API call.
-        """
-
-        from clmcservice.aggregationapi.views import AggregatorController  # nested import so that importing the class view is part of the test itself
-
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
-
-        # test restarting the aggregator process when it is running
-        request = testing.DummyRequest()
-        input_body = input_body
-        request.body = input_body.encode(request.charset)
-
-        error_raised = False
-        try:
-            AggregatorController(request).put()
-        except HTTPBadRequest:
-            error_raised = True
-
-        assert error_raised
-
-    def test_GET_status(self):
-        """
-        Tests the GET method for the status of the aggregator.
-        """
-
-        from clmcservice.aggregationapi.views import AggregatorController  # nested import so that importing the class view is part of the test itself
-
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
-
-        request = testing.DummyRequest()
-        response = AggregatorController(request).get()
-
-        assert response == {'aggregator_running': False}, "Response must be a dictionary representing a JSON object with the correct status data of the aggregator."
-
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "A GET request must not modify the aggregator status flag."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "A GET request must not start the aggregator process."
-
-        # test status with malformed configuration
-        # start the aggregator
-        request = testing.DummyRequest()
-        input_body = '{"action": "start"}'
-        request.body = input_body.encode(request.charset)
-        AggregatorController(request).put()
-        self.registry.get_settings()[MALFORMED_FLAG] = True
-
-        request = testing.DummyRequest()
-        response = AggregatorController(request).get()
-
-        assert response == {'aggregator_running': True,
-                            'malformed': True,
-                            'comment': 'Aggregator is running in a malformed state - it uses an old version of the configuration. Please, restart it so that the updated configuration is used.'}, \
-            "Response must be a dictionary representing a JSON object with the correct status data of the aggregator."
-
-        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "A GET request must not modify the aggregator status flag."
-        assert self.registry.get_settings().get(MALFORMED_FLAG), "A GET request must not modify the aggregator malformed flag."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is not None, "A GET request must not stop the aggregator process."
-
-        # kill the started process after the test is over
-        pid = request.registry.settings[PROCESS_ATTRIBUTE].pid
-        os.kill(pid, signal.SIGTERM)
-
-    def test_malformed_flag_behaviour(self):
-        """
-        Tests the behaviour of the malformed configuration flag of the aggregator when doing a sequence of API calls.
-        """
-
-        from clmcservice.aggregationapi.views import AggregatorController, AggregatorConfig  # nested import so that importing the class view is part of the test itself
-
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert not self.registry.get_settings().get(MALFORMED_FLAG), "Initially aggregator is not in a malformed state"
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
-        assert int(self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_report_period')) == 5, "Initial report period is 5 seconds."
-        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_name') == 'CLMCMetrics', "Initial database name the aggregator uses is CLMCMetrics."
-        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_url') == "http://172.40.231.51:8086", "Initial aggregator url is http://172.40.231.51:8086"
-
-        # start the aggregator with the default configuration
-        request = testing.DummyRequest()
-        input_body = '{"action": "start"}'
-        request.body = input_body.encode(request.charset)
-
-        response = AggregatorController(request).put()
-        assert response == {RUNNING_FLAG: True}, "The aggregator should have been started."
-
-        # update the configuration of the aggregator while it is running
-        config_body = '{"aggregator_report_period": 15, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.50.231.51:8086"}'
-        output_body = {'aggregator_report_period': 15, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://172.50.231.51:8086", 'malformed': True,
-                       'comment': 'Aggregator is running in a malformed state - it uses an old version of the configuration. Please, restart it so that the updated configuration is used.'}
-        request = testing.DummyRequest()
-        request.body = config_body.encode(request.charset)
-        response = AggregatorConfig(request).put()
-        assert response == output_body, "Response of PUT request must include the new configuration of the aggregator"
-
-        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator shouldn't be stopped when the configuration is updated."
-        assert self.registry.get_settings().get(MALFORMED_FLAG), "The malformed flag should be set when the configuration is updated while the process is running."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is not None, "The aggregator shouldn't be stopped when the configuration is updated."
-
-        # check that the malformed flag has been updated through a GET call
-        request = testing.DummyRequest()
-        response = AggregatorController(request).get()
-        assert response == {'aggregator_running': True,
-                            'malformed': True,
-                            'comment': 'Aggregator is running in a malformed state - it uses an old version of the configuration. Please, restart it so that the updated configuration is used.'}, \
-            "Response must be a dictionary representing a JSON object with the correct status data of the aggregator."
-
-        # restart the aggregator with the new configuration
-        request = testing.DummyRequest()
-        input_body = '{"action": "restart"}'
-        request.body = input_body.encode(request.charset)
-        response = AggregatorController(request).put()
-        assert response == {RUNNING_FLAG: True}, "The aggregator should have been restarted."
-        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been restarted."
-        assert not self.registry.get_settings().get(MALFORMED_FLAG), "The malformed flag should have been reset to False."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is not None, "The aggregator should have been restarted."
-
-        # update the configuration again while the aggregator is running
-        config_body = '{"aggregator_report_period": 30, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.50.231.51:8086"}'
-        output_body = {'aggregator_report_period': 30, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://172.50.231.51:8086", 'malformed': True,
-                       'comment': 'Aggregator is running in a malformed state - it uses an old version of the configuration. Please, restart it so that the updated configuration is used.'}
-        request = testing.DummyRequest()
-        request.body = config_body.encode(request.charset)
-        response = AggregatorConfig(request).put()
-        assert response == output_body, "Response of PUT request must include the new configuration of the aggregator"
-
-        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator shouldn't be stopped when the configuration is updated."
-        assert self.registry.get_settings().get(MALFORMED_FLAG), "The malformed flag should be set when the configuration is updated while the process is running."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is not None, "The aggregator shouldn't be stopped when the configuration is updated."
-
-        # stop the aggregator - this should also reset the malformed status flag
-        request = testing.DummyRequest()
-        input_body = '{"action": "stop"}'
-        request.body = input_body.encode(request.charset)
-        response = AggregatorController(request).put()
-        assert response == {RUNNING_FLAG: False}, "The aggregator should have been stopped."
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been stopped."
-        assert not self.registry.get_settings().get(MALFORMED_FLAG), "The malformed flag should have been reset to False."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "The aggregator should have been stopped."
-
-    def test_unconfigured_state(self):
-        """
-        Tests the behaviour of the service when in unconfigured state.
-        """
-
-        from clmcservice.aggregationapi.views import AggregatorConfig, AggregatorController
-
-        self.registry.get_settings()[CONF_OBJECT] = None  # unconfigured state - conf object is None
-
-        # when doing a GET for the configuration we expect a bad request if the service is in unconfigured state
-        bad_request = False
-        bad_request_msg = None
-        try:
-            request = testing.DummyRequest()
-            AggregatorConfig(request).get()
-        except HTTPBadRequest as err:
-            bad_request = True
-            bad_request_msg = err.message
-
-        assert bad_request
-        assert bad_request_msg == "Aggregator has not been configured, yet. Send a PUT request to /aggregator/config with a JSON body of the configuration."
-
-        # when doing a PUT for the aggregator to start/stop/restart we expect a bad request if the service is in unconfigured state
-        for action in ('start', 'stop', 'restart'):
-            bad_request = False
-            bad_request_msg = None
-            try:
-                request = testing.DummyRequest()
-                request.body = ('{"action": "' + action + '"}').encode(request.charset)
-                AggregatorController(request).put()
-            except HTTPBadRequest as err:
-                bad_request = True
-                bad_request_msg = err.message
-
-            assert bad_request
-            assert bad_request_msg == "You must configure the aggregator before controlling it. Send a PUT request to /aggregator/config with a JSON body of the configuration."
-
-        # configure the aggregator
-        input_body = '{"aggregator_report_period": 10, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://171.40.231.51:8086"}'
-        output_body = {'aggregator_report_period': 10, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://171.40.231.51:8086"}
-        request = testing.DummyRequest()
-        request.body = input_body.encode(request.charset)
-        response = AggregatorConfig(request).put()
-        assert response == output_body
-
-        request = testing.DummyRequest()
-        assert AggregatorConfig(request).get() == output_body
-
-
-class TestRegexURL(object):
-    """
-    A pytest-implementation test for the regular expression the service uses to validate the database URL
-    """
-
-    @pytest.mark.parametrize("valid_url", [
-        "http://localhost:8080/",
-        "https://localhost:80/url/path",
-        "https://192.168.20.20/?query=param",
-        "http://custom.domain.com",
-        "http://domain.net:8888/",
-        "https://10.160.150.4:21",
-        "http://localhost:12345",
-        "http://domain.com:21/path",
-        "http://domain.com:32?path",
-        "http://domain.com:43#path"
-    ])
-    def test_valid_urls(self, valid_url):
-        """
-        Tests that the regular expression can detect valid URLs.
-
-        :param valid_url: a string representing a valid URL
-        """
-
-        matched_object = URL_REGEX.match(valid_url)
-
-        assert matched_object is not None, "The regular expression fails in validating a correct URL."
-
-        assert matched_object.group() is not None, "The matched object should return the full-match string"
-
-    @pytest.mark.parametrize("invalid_url", [
-        "ftp://localhost:80/url/path",
-        "tcp://192.168.20.20/?query=param",
-        "http:/localhost:80/",
-        "https//localhost:8080/",
-        "https://domain:1234/url/path",
-        "http://domain.com:808080/",
-        "http://localhost:8-080/",
-        "http://localhost:port80/",
-        "http://domain.com:8080url/path",
-        "http://domain.com:8080/?url path",
-    ])
-    def test_invalid_urls(self, invalid_url):
-        """
-        Tests that the regular expression can detect invalid URLs.
-
-        :param invalid_url: a string representing an invalid URL
-        """
-
-        matched_object = URL_REGEX.match(invalid_url)
-
-        assert matched_object is None, "The regular expression fails in detecting an invalid URL."
diff --git a/src/service/clmcservice/aggregationapi/utilities.py b/src/service/clmcservice/aggregationapi/utilities.py
deleted file mode 100644
index 2375300730d47d2f6927961674f8064bc91bfe3a..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/aggregationapi/utilities.py
+++ /dev/null
@@ -1,180 +0,0 @@
-#!/usr/bin/python3
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          15-05-2018
-//      Created for Project :   FLAME
-"""
-
-from json import loads
-from re import compile, IGNORECASE
-from configparser import ConfigParser
-
-CONF_FILE_ATTRIBUTE = 'configuration_file_path'  # the attribute pointing to the configuration file path
-CONF_OBJECT = 'configuration_object'  # the attribute, which stores the service configuration object
-
-AGGREGATOR_CONFIG_SECTION = "AGGREGATOR"  # the section in the configuration holding all the configuration attributes declared below
-CONFIG_ATTRIBUTES = ('aggregator_report_period', 'aggregator_database_name', 'aggregator_database_url')  # all of the configuration attributes - to be used as dictionary keys
-
-RUNNING_FLAG = 'aggregator_running'  # Attribute for storing the flag, which shows whether the aggregator is running or not - to be used as a dictionary key
-
-PROCESS_ATTRIBUTE = 'aggregator_process'  # Attribute for storing the process object of the aggregator - to be used as a dictionary key
-
-# a 'malformed' running state of the aggregator is when the configuration is updated, but the aggregator is not restarted so it is running with an old version of the conf.
-MALFORMED_FLAG = 'malformed'  # Attribute for storing the flag, which shows whether the aggregator is running in an malformed state or not - to be used as a dictionary key
-
-# used to indicate a malformed configuration message
-COMMENT_ATTRIBUTE = 'comment'
-COMMENT_VALUE = 'Aggregator is running in a malformed state - it uses an old version of the configuration. Please, restart it so that the updated configuration is used.'
-
-URL_REGEX = compile(
-    r'^https?://'  # http:// or https://
-    r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'  # domain, e.g. example.domain.com
-    r'localhost|'  # or localhost...
-    r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'  # or IP address (IPv4 format)
-    r'(?::\d{2,5})?'  # optional port number
-    r'(?:[/?#][^\s]*)?$',  # URL path or query parameters
-    IGNORECASE)
-
-
-def validate_config_content(configuration):
-    """
-    A utility function to validate a configuration string representing a JSON dictionary.
-
-    :param configuration: the configuration string to validate
-    :return the validated configuration dictionary object with the values converted to their required type
-    :raise AssertionError: if the argument is not a valid configuration
-    """
-
-    global CONFIG_ATTRIBUTES
-
-    try:
-        configuration = loads(configuration)
-    except:
-        raise AssertionError("Configuration must be a JSON object.")
-
-    assert len(configuration) == len(CONFIG_ATTRIBUTES), "Configuration mustn't contain a different number of attributes than the number of required ones."
-
-    for attribute in CONFIG_ATTRIBUTES:
-        assert attribute in configuration, "Required attribute not found in the request content."
-
-    assert type(configuration.get('aggregator_report_period')) == int, "Report period must be an integer, received {0} instead.".format(configuration.get('aggregator_report_period'))
-
-    assert configuration.get('aggregator_report_period') > 0, "Report period must be a positive integer, received {0} instead.".format(configuration.get('aggregator_report_period'))
-
-    assert URL_REGEX.match(configuration.get('aggregator_database_url')) is not None, "The aggregator must have a valid database URL in its configuration, received {0} instead.".format(configuration.get('aggregator_database_url'))
-
-    return configuration
-
-
-def validate_action_content(content):
-    """
-    A utility function to validate a content string representing a JSON dictionary.
-
-    :param content: the content string to validate
-    :return: the validated content dictionary
-    :raise AssertionError: if the argument is not a valid json content
-    """
-
-    try:
-        content = loads(content)
-    except:
-        raise AssertionError("Content must be a JSON object.")
-
-    assert len(content) == 1, "Content mustn't contain more attributes than the required one."
-
-    assert content['action'] in ('start', 'stop', 'restart')
-
-    return content
-
-
-def validate_conf_file(conf_file_path):
-    """
-    Validates the aggregator's configuration file - checks for existence of the file path, whether it can be parsed as a configuration file and
-    whether it contains the required configuration attributes.
-
-    :param conf_file_path: the configuration file path to check
-
-    :return: the parsed configuration if valid, None otherwise
-    """
-
-    global AGGREGATOR_CONFIG_SECTION, CONFIG_ATTRIBUTES
-
-    conf = ConfigParser()
-    result = conf.read(conf_file_path)
-
-    # if result doesn't contain one element, namely the conf_file_path,
-    # then the configuration file cannot be parsed for some reason (doesn't exist, cannot be opened, invalid, etc.)
-    if len(result) == 0:
-        return None
-
-    if AGGREGATOR_CONFIG_SECTION not in conf.sections():
-        return None  # the config should include a section called AGGREGATOR
-
-    for key in CONFIG_ATTRIBUTES:
-        if key not in conf[AGGREGATOR_CONFIG_SECTION]:
-            return None  # the configuration must include each configuration attribute
-
-    try:
-        int(conf[AGGREGATOR_CONFIG_SECTION]['aggregator_report_period'])
-    except ValueError:
-        return None  # the configuration must contain a valid integer for the aggregator's report period
-
-    return conf
-
-
-def generate_e2e_delay_report(path_id, source_sfr, target_sfr, endpoint, sf_instance, delay_forward, delay_reverse, delay_service, avg_request_size, avg_response_size, avg_bandwidth, time):
-    """
-    Generates a combined averaged measurement about the e2e delay and its contributing parts
-
-    :param path_id: The path identifier, which is a bidirectional path ID for the request and the response path
-    :param source_sfr: source service router
-    :param target_sfr: target service router
-    :param endpoint: endpoint of the media component
-    :param sf_instance: service function instance (media component)
-    :param delay_forward: Path delay (Forward direction)
-    :param delay_reverse: Path delay (Reverse direction)
-    :param delay_service: the media service component response time
-    :param avg_request_size: averaged request size
-    :param avg_response_size: averaged response size
-    :param avg_bandwidth: averaged bandwidth
-    :param time: measurement timestamp
-    :return: a list of dict-formatted reports to post on influx
-    """
-
-    result = [{"measurement": "e2e_delays",
-               "tags": {
-                   "path_ID": path_id,
-                   "source_SFR": source_sfr,
-                   "target_SFR": target_sfr,
-                   "endpoint": endpoint,
-                   "sf_instance": sf_instance
-               },
-               "fields": {
-                   "delay_forward": float(delay_forward),
-                   "delay_reverse": float(delay_reverse),
-                   "delay_service": float(delay_service),
-                   "avg_request_size": float(avg_request_size),
-                   "avg_response_size": float(avg_response_size),
-                   "avg_bandwidth": float(avg_bandwidth)
-               },
-               "time": int(1000000000*time)
-               }]
-
-    return result
diff --git a/src/service/clmcservice/aggregationapi/views.py b/src/service/clmcservice/aggregationapi/views.py
deleted file mode 100644
index 154e3511de170ff0de2acbd885c9d2bcc8a338c7..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/aggregationapi/views.py
+++ /dev/null
@@ -1,252 +0,0 @@
-#!/usr/bin/python3
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          15-05-2018
-//      Created for Project :   FLAME
-"""
-
-from pyramid.view import view_defaults, view_config
-from pyramid.httpexceptions import HTTPBadRequest
-from subprocess import Popen
-from clmcservice.aggregationapi.utilities import validate_config_content, validate_action_content, \
-    CONF_OBJECT, CONF_FILE_ATTRIBUTE, AGGREGATOR_CONFIG_SECTION, CONFIG_ATTRIBUTES, RUNNING_FLAG, PROCESS_ATTRIBUTE, MALFORMED_FLAG, COMMENT_ATTRIBUTE, COMMENT_VALUE
-import os
-import os.path
-import sys
-import logging
-import configparser
-
-
-log = logging.getLogger('service_logger')
-
-
-@view_defaults(route_name='aggregator_config', renderer='json')
-class AggregatorConfig(object):
-    """
-    A class-based view for accessing and mutating the configuration of the aggregator.
-    """
-
-    def __init__(self, request):
-        """
-        Initialises the instance of the view with the request argument.
-
-        :param request: client's call request
-        """
-
-        self.request = request
-
-    @view_config(request_method="GET")
-    def get(self):
-        """
-        A GET API call for the configuration of the aggregator.
-
-        :return: A JSON response with the configuration of the aggregator.
-        """
-
-        aggregator_config_data = self.request.registry.settings[CONF_OBJECT]  # fetch the configuration object
-        if aggregator_config_data is None:
-            raise HTTPBadRequest("Aggregator has not been configured, yet. Send a PUT request to /aggregator/config with a JSON body of the configuration.")
-
-        config = {key: aggregator_config_data[AGGREGATOR_CONFIG_SECTION][key] for key in CONFIG_ATTRIBUTES}  # extract a json value containing the config attributes
-        config['aggregator_report_period'] = int(config['aggregator_report_period'])
-
-        return config
-
-    @view_config(request_method="PUT")
-    def put(self):
-        """
-        A PUT API call for the status of the aggregator.
-
-        :return: A JSON response to the PUT call - essentially with the new configured data and comment of the state of the aggregator
-        :raises HTTPBadRequest: if request body is not a valid JSON for the configurator
-        """
-
-        try:
-            new_config = self.request.body.decode(self.request.charset)
-            new_config = validate_config_content(new_config)  # validate the content and receive a json dictionary object
-        except AssertionError as e:
-            raise HTTPBadRequest("Bad request content. Configuration format is incorrect: {0}".format(e.args))
-
-        conf = self.request.registry.settings[CONF_OBJECT]
-        if conf is None:
-            conf = configparser.ConfigParser()
-            conf[AGGREGATOR_CONFIG_SECTION] = {}
-            self.request.registry.settings[CONF_OBJECT] = conf
-            old_config = {}
-        else:
-            # save the old configuration before updating so that it can be compared to the new one and checked for malformed state
-            old_config = {attribute: conf[AGGREGATOR_CONFIG_SECTION][attribute] for attribute in CONFIG_ATTRIBUTES}
-            old_config['aggregator_report_period'] = int(old_config['aggregator_report_period'])
-
-        for attribute in CONFIG_ATTRIBUTES:
-            conf[AGGREGATOR_CONFIG_SECTION][attribute] = str(new_config.get(attribute))  # update the configuration attributes
-
-        # if configuration is not already malformed, check whether the configuration is updated (changed in any way), if so (and the aggregator is running), malformed state is detected
-        if not self.request.registry.settings[MALFORMED_FLAG]:
-            malformed = old_config != new_config and AggregatorController.is_process_running(self.request.registry.settings.get(PROCESS_ATTRIBUTE))
-            self.request.registry.settings[MALFORMED_FLAG] = malformed
-            if malformed:
-                new_config[MALFORMED_FLAG] = True
-                new_config[COMMENT_ATTRIBUTE] = COMMENT_VALUE
-
-        self._write_conf_file()  # save the updated configuration to conf file
-        return new_config
-
-    def _write_conf_file(self):
-        """
-        Writes the configuration settings of the aggregator to a file with path stored at CONF_FILE_ATTRIBUTE
-        """
-
-        conf = self.request.registry.settings[CONF_OBJECT]
-        conf_file_path = self.request.registry.settings[CONF_FILE_ATTRIBUTE]
-        os.makedirs(os.path.dirname(conf_file_path), exist_ok=True)
-
-        log.info("Saving configuration to file {0}.".format(conf_file_path))
-        with open(conf_file_path, 'w') as configfile:
-            log.info("Opened configuration file {0}.".format(conf_file_path))
-            conf.write(configfile)
-        log.info("Successfully saved configuration to file {0}.".format(conf_file_path))
-
-
-@view_defaults(route_name='aggregator_controller', renderer='json')
-class AggregatorController(object):
-
-    """
-    A class-based view for controlling the aggregator.
-    """
-
-    def __init__(self, request):
-        """
-        Initialises the instance of the view with the request argument.
-
-        :param request: client's call request
-        """
-
-        self.request = request
-
-    @view_config(request_method="GET")
-    def get(self):
-        """
-        A GET API call for the status of the aggregator - running or not.
-
-        :return: A JSON response with the status of the aggregator.
-        """
-
-        aggregator_data = self.request.registry.settings
-        aggregator_process = aggregator_data.get(PROCESS_ATTRIBUTE)
-        aggregator_running = self.is_process_running(aggregator_process)
-
-        config = {RUNNING_FLAG: aggregator_running}
-
-        if aggregator_data[MALFORMED_FLAG] and aggregator_running:
-            config[MALFORMED_FLAG] = True
-            config[COMMENT_ATTRIBUTE] = COMMENT_VALUE
-
-        return config
-
-    @view_config(request_method="PUT")
-    def put(self):
-        """
-        A PUT API call for the status of the aggregator.
-
-        :return: A JSON response to the PUT call - essentially saying whether the aggregator is running or not
-        :raises HTTPBadRequest: if request body is not a valid JSON for the controller
-        """
-
-        content = self.request.body.decode(self.request.charset)
-
-        try:
-            content = validate_action_content(content)
-
-            conf = self.request.registry.settings[CONF_OBJECT]
-            if conf is None:
-                raise HTTPBadRequest("You must configure the aggregator before controlling it. Send a PUT request to /aggregator/config with a JSON body of the configuration.")
-
-            aggregator_config = {attribute: conf[AGGREGATOR_CONFIG_SECTION][attribute] for attribute in CONFIG_ATTRIBUTES}
-            aggregator_config['aggregator_report_period'] = int(aggregator_config['aggregator_report_period'])
-
-            action = content['action']
-
-            aggregator_running = self.is_process_running(self.request.registry.settings.get(PROCESS_ATTRIBUTE))
-            if action == 'start':
-                if not aggregator_running:
-                    process = self.start_aggregator(aggregator_config)
-                    aggregator_running = True
-                    self.request.registry.settings[PROCESS_ATTRIBUTE] = process
-                    self.request.registry.settings[MALFORMED_FLAG] = False
-            elif action == 'stop':
-                self.stop_aggregator(self.request.registry.settings.get(PROCESS_ATTRIBUTE))
-                aggregator_running = False
-                self.request.registry.settings[PROCESS_ATTRIBUTE] = None
-                self.request.registry.settings[MALFORMED_FLAG] = False
-            elif action == 'restart':
-                self.stop_aggregator(self.request.registry.settings.get(PROCESS_ATTRIBUTE))
-                process = self.start_aggregator(aggregator_config)
-                aggregator_running = True
-                self.request.registry.settings[PROCESS_ATTRIBUTE] = process
-                self.request.registry.settings[MALFORMED_FLAG] = False
-
-            return {RUNNING_FLAG: aggregator_running}
-
-        except AssertionError:
-            raise HTTPBadRequest('Bad request content - must be in JSON format: {"action": value}, where value is "start", "stop" or "restart".')
-
-    @staticmethod
-    def start_aggregator(config):
-        """
-        An auxiliary method to start the aggregator.
-
-        :param config: the configuration containing the arguments for the aggregator
-        :return: the process object of the started aggregator script
-        """
-
-        python_interpreter = sys.executable
-        command = [python_interpreter, '-m', 'clmcservice.aggregation.aggregator', '--period', str(config.get('aggregator_report_period')), '--database',
-                   config.get('aggregator_database_name'), '--url', config.get('aggregator_database_url')]
-        process = Popen(command)
-
-        log.info("\nStarted aggregator process with PID: {0}\n".format(process.pid))
-
-        return process
-
-    @staticmethod
-    def stop_aggregator(process):
-        """
-        An auxiliary method to stop the aggregator.
-
-        :param process: the process to terminate
-        """
-
-        # check if the process is started
-        if AggregatorController.is_process_running(process):
-            process.terminate()
-            log.info("\nStopped aggregator process with PID: {0}\n".format(process.pid))
-
-    @staticmethod
-    def is_process_running(process):
-        """
-        Checks if a process is running.
-
-        :param process: the Popen object to check
-        :return: True if running, False otherwise
-        """
-
-        # check if the process is started before trying to terminate it - process.poll() only returns something if the process has terminated, hence we check for a None value
-        return process is not None and process.poll() is None
diff --git a/src/service/clmcservice/alertsapi/__init__.py b/src/service/clmcservice/alertsapi/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/service/clmcservice/alertsapi/alerts_specification_schema.py b/src/service/clmcservice/alertsapi/alerts_specification_schema.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f6b1e03b947bfa0f360bc3f3e8cee14decc07f2
--- /dev/null
+++ b/src/service/clmcservice/alertsapi/alerts_specification_schema.py
@@ -0,0 +1,129 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          16-08-2018
+//      Created for Project :   FLAME
+"""
+
+# Python standard libs
+from re import compile, IGNORECASE
+
+# PIP installed libs
+from schema import Schema, And, Or, Optional, SchemaError
+
+"""
+This module defines the schema objects for the TOSCA Alert Specification:
+
+        * flame_clmc_alerts_definitions.yaml must be the only import
+        * metadata section must be present (with key-value pairs for sfc and sfci)
+        * policies section must be present (under the topology_template node)
+        * each policy must be associated with a triggers node (containing at least 1 trigger)
+        * each policy is of type eu.ict-flame.policies.StateChange or eu.ict-flame.policies.Alert
+        * each trigger must specify event_type, metric, condition, and at least one handler in action/implementation
+        * the condition section must specify threshold, granularity, aggregation_method, comparison_operator
+"""
+
+# Influx QL functions defined in the documentation https://docs.influxdata.com/influxdb/v1.6/query_language/functions/
+INFLUX_QL_FUNCTIONS = (
+    "count", "mean", "median", "mode", "sum", "first", "last", "max", "min"
+)
+
+# Kapacitor Tick Script template IDs
+TICK_SCRIPT_TEMPLATES = ("threshold", "relative", "deadman")
+
+# Allowed comparison operators and their logical values
+COMPARISON_OPERATORS = {"lt": "<", "gt": ">", "lte": "<=", "gte": ">=", "eq": "=", "neq": "<>"}
+
+# Regular expression for validating http handlers
+URL_REGEX = compile(
+    r'^https?://'  # http:// or https://
+    r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'  # domain, e.g. example.domain.com
+    r'localhost|'  # or localhost...
+    r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'  # or IP address (IPv4 format)
+    r'(?::\d{2,5})?'  # optional port number
+    r'(?:[/?#][^\s]*)?$',  # URL path or query parameters
+    IGNORECASE)
+
+# Global tags allowed to be used for filtering in the trigger condition
+CLMC_INFORMATION_MODEL_GLOBAL_TAGS = {"flame_sfc", "flame_sfci", "flame_sfp", "flame_sf", "flame_sfe", "flame_server", "flame_location"}
+
+ALERTS_SPECIFICATION_SCHEMA = Schema({
+    "tosca_definitions_version": And(str, lambda v: v == "tosca_simple_profile_for_nfv_1_0_0"),
+    Optional("description"): str,
+    "imports": And([lambda s: s.endswith("flame_clmc_alerts_definitions.yaml")], lambda l: len(l) == 1),
+    "metadata": {
+        "sfc": str,
+        "sfci": str
+    },
+    "topology_template": {
+        "policies": [
+            {
+                str: {
+                    "type": Or("eu.ict-flame.policies.StateChange", "eu.ict-flame.policies.Alert"),
+                    "triggers": And({
+                        str: {
+                            Optional("description"): str,
+                            "event_type": And(str, lambda s: s in TICK_SCRIPT_TEMPLATES),
+                            "metric": And(str, lambda s: len(s.split('.', 1)) == 2),
+                            "condition": {
+                                "threshold": Or(int, float),
+                                "granularity": int,
+                                Optional("aggregation_method"): And(str, lambda s: s in INFLUX_QL_FUNCTIONS),
+                                Optional("resource_type"): {
+                                    And(str, lambda s: s in CLMC_INFORMATION_MODEL_GLOBAL_TAGS): str
+                                },
+                                Optional("comparison_operator"): And(str, lambda s: s in COMPARISON_OPERATORS)
+                            },
+                            "action": {
+                                "implementation":
+                                    [
+                                        And(str, lambda s: URL_REGEX.match(s) is not None)
+                                    ]
+                            }
+                        }
+                    }, lambda l: len(l) > 0)
+                }
+            }
+        ]
+    }
+})
+
+
+def validate_clmc_alerts_specification(tosca_yaml_tpl, include_error=False):
+    """
+    CLMC validation for the TOSCA alerts specification, uses the schema defined in alerts_specification_schema.py
+
+    :param tosca_yaml_tpl: the tosca template to validate (as python dictionary object)
+    :param include_error: a flag indicating whether the output of the function should include a caught SchemaError
+        (if set to True and no error is thrown, returns None as the error object)
+
+    :return: True/False if the tosca_tpl is valid/invalid along with any error (None if no error) that was thrown during validation (if argument include_error is set to True)
+    """
+
+    try:
+        ALERTS_SPECIFICATION_SCHEMA.validate(tosca_yaml_tpl)
+        valid, err = True, None
+    except SchemaError as e:
+        valid, err = False, e
+
+    if include_error:
+        return valid, err
+    else:
+        return valid
diff --git a/src/service/clmcservice/alertsapi/tests.py b/src/service/clmcservice/alertsapi/tests.py
new file mode 100644
index 0000000000000000000000000000000000000000..09e4d61c071f8b6c47f36a4d2af7761459241d68
--- /dev/null
+++ b/src/service/clmcservice/alertsapi/tests.py
@@ -0,0 +1,322 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          14-08-2018
+//      Created for Project :   FLAME
+"""
+
+
+# Python standard libs
+from os import listdir
+from os.path import isfile, join, dirname
+from urllib.parse import urlparse
+
+# PIP installed libs
+import pytest
+from pyramid.httpexceptions import HTTPBadRequest
+from yaml import load
+from pyramid import testing
+from requests import get, delete
+from toscaparser.tosca_template import ToscaTemplate
+
+# CLMC-service imports
+from clmcservice.alertsapi.utilities import adjust_tosca_definitions_import
+from clmcservice.alertsapi.alerts_specification_schema import validate_clmc_alerts_specification
+from clmcservice.alertsapi.views import AlertsConfigurationAPI
+from clmcservice import ROOT_DIR
+
+
+class TestAlertsConfigurationAPI(object):
+    """
+    A pytest-implementation test for the Alerts Configuration API endpoints.
+    """
+
+    @pytest.fixture(autouse=True)
+    def print_fixture(self):
+        """
+        Fixture to adjust the printing format when running pytest with the "-s" flag - by default print messages mix up with pytest's output
+        """
+
+        print()
+
+    @pytest.fixture()
+    def app_config(self):
+        """
+        A fixture to implement setUp/tearDown functionality for all tests by initializing configuration structure for the web service
+        """
+
+        self.registry = testing.setUp()
+        self.registry.add_settings({"kapacitor_host": "localhost", "kapacitor_port": 9092})
+
+        yield
+
+        testing.tearDown()
+
+    @pytest.mark.parametrize("sfc_id, sfci_id, policy_id, trigger_id, task_id, topic_id", [
+        ("MSDemo", "MSDemo-premium", "requests_diff", "low_requests", "094f23d6e948c78e9fa215528973fb3aeefa5525898626c9ea049dc8e87a7388", "094f23d6e948c78e9fa215528973fb3aeefa5525898626c9ea049dc8e87a7388"),
+        ("sfc", "sfc_1", "missing_measurements", "no_latency_measurements", "1019ab70402a98905ff9e950c36d3c13a8468db8f7b8196f4ee69d6c39947fe5", "1019ab70402a98905ff9e950c36d3c13a8468db8f7b8196f4ee69d6c39947fe5"),
+        ("disney.sfc.flame.eu", "disney.sfc.flame.eu-premium", "rtt_deviation", "increase_in_rtt", "8b069cc10571f887ff6c3a14aea11e68e664ddde5bc6af08d88058865510af93", "8b069cc10571f887ff6c3a14aea11e68e664ddde5bc6af08d88058865510af93"),
+        ("vrt/novel/sfc", "vrt/novel/sfc-prototype", "deadmen_measurement", "missing_storage_measurements", "8c1aba1c61c184809d648e4ab323b9b48606847bb2e3d54bfbdf34d8a7db91d1", "8c1aba1c61c184809d648e4ab323b9b48606847bb2e3d54bfbdf34d8a7db91d1"),
+        ("vrt/novel/sfc", "vrt/novel/sfc-deployment", "deadmen_measurement", "missing_storage_measurements", "a61dbe20cbef19be384982b51e22d83f23c4e10aeede172ef8646c515b836f7b", "a61dbe20cbef19be384982b51e22d83f23c4e10aeede172ef8646c515b836f7b")
+    ])
+    def test_alerts_hash_getter(self, sfc_id, sfci_id, policy_id, trigger_id, task_id, topic_id):
+        """
+        Tests the GET method for retrieving the task and topic ID given the sfc, sfc instance, policy id and trigger id.
+        """
+
+        request = testing.DummyRequest()
+        request.params["sfc"] = sfc_id
+        request.params["sfci"] = sfci_id
+        request.params["policy"] = policy_id
+        request.params["trigger"] = trigger_id
+
+        response = AlertsConfigurationAPI(request).get_alerts_hash()
+
+        assert response["task_identifier"] == task_id, "Incorrect task identifier returned"
+        assert response["task_api_endpoint"] == "/kapacitor/v1/tasks/{0}".format(task_id), "Incorrect task API endpoint returned."
+        assert response["topic_identifier"] == topic_id, "Incorrect topic identifier returned"
+        assert response["topic_api_endpoint"] == "/kapacitor/v1/alerts/topics/{0}".format(topic_id), "Incorrect topic API endpoint returned"
+        assert response["topic_handlers_api_endpoint"] == "/kapacitor/v1/alerts/topics/{0}/handlers".format(topic_id), "Incorrect topic handlers API endpoint returned"
+
+    def test_alerts_config_tosca_parsing(self):
+        """
+        Tests that what we consider a valid/invalid alerts specification is successfully/unsuccessfully parsed by the TOSCA-parser.
+        """
+
+        for path_suffix, valid_expected in (("valid", True), ("invalid", False)):
+            test_data_path = join(dirname(ROOT_DIR), *["resources", "tosca", "test-data", "tosca-parser", path_suffix])
+
+            for test_file_path in listdir(test_data_path):
+                alert_config_abs_path = join(test_data_path, test_file_path)
+
+                if not isfile(alert_config_abs_path):
+                    continue  # skip directories
+
+                print(alert_config_abs_path, valid_expected)
+
+                with open(alert_config_abs_path, 'r') as fh:
+                    yaml_content = load(fh)
+                    adjust_tosca_definitions_import(yaml_content)
+
+                valid_real = True
+                try:
+                    ToscaTemplate(yaml_dict_tpl=yaml_content)
+                except Exception:
+                    valid_real = False
+
+                assert valid_expected == valid_real, "TOSCA parser test failed for file: {0}".format(alert_config_abs_path)
+
+    def test_alerts_config_clmc_validation(self):
+        """
+        Tests the custom CLMC validation of the TOSCA alerts specification.
+        """
+
+        for path_suffix, valid_expected in (("valid", True), ("invalid", False)):
+            test_data_path = join(dirname(ROOT_DIR), *["resources", "tosca", "test-data", "clmc-validator", path_suffix])
+
+            for test_file_path in listdir(test_data_path):
+                alert_config_abs_path = join(test_data_path, test_file_path)
+
+                if not isfile(alert_config_abs_path):
+                    continue  # skip directories
+
+                print(alert_config_abs_path, valid_expected)
+
+                with open(alert_config_abs_path, 'r') as fh:
+                    yaml_content = load(fh)
+                    adjust_tosca_definitions_import(yaml_content)
+
+                # do not catch exceptions here since we are testing the clmc validator, the tosca parsing is tested in the previous test method
+                alert_tosca_spec = ToscaTemplate(yaml_dict_tpl=yaml_content)
+                valid_real, err = validate_clmc_alerts_specification(alert_tosca_spec.tpl, include_error=True)
+                assert valid_expected == valid_real, "CLMC alerts specification validator test failed for file: {0}".format(alert_config_abs_path)
+
+    def test_alerts_config_api_post(self, app_config):
+        """
+        Tests the POST API endpoint of the alerts configuration API responsible for receiving alerts specifications.
+
+        Test steps are:
+            * Traverse all valid TOSCA Alerts Specifications in the
+                src/service/clmcservice/resources/tosca/test-data/clmc-validator/valid and src/service/clmcservice/resources/tosca/test-data/tosca-parser/valid
+            * Sending a valid TOSCA Alert Specification to the view responsible for configuring Kapacitor
+            * Check that Kapacitor alerts, topics and handlers are created with the correct identifier and arguments
+
+        :param app_config: fixture for setUp/tearDown of the web service registry
+        """
+
+        test_folder = "clmc-validator"
+        alerts_test_data_path = join(dirname(ROOT_DIR), *["resources", "tosca", "test-data", test_folder, "valid"])
+        resources_test_data_path = join(dirname(ROOT_DIR), *["resources", "tosca", "test-data", "resource-spec"])
+
+        for alerts_test_file in listdir(alerts_test_data_path):
+            alert_spec_abs_path = join(alerts_test_data_path, alerts_test_file)
+
+            if not isfile(alert_spec_abs_path):
+                continue  # skip directories
+
+            print("Testing file {0} in folder {1}".format(alerts_test_file, test_folder))
+
+            valid_resources_test_file = alerts_test_file.replace("alerts", "resources_valid")
+            invalid_resources_test_file = alerts_test_file.replace("alerts", "resources_invalid")
+            valid_resource_spec_abs_path = join(resources_test_data_path, valid_resources_test_file)
+            invalid_resource_spec_abs_path = join(resources_test_data_path, invalid_resources_test_file)
+
+            print("Test uses resource spec. files {0} and {1}".format(valid_resources_test_file, invalid_resources_test_file))
+
+            with open(alert_spec_abs_path) as alert_spec:
+                # first send an inconsistent resource spec
+                with open(invalid_resource_spec_abs_path) as invalid_resource_spec:
+                    request = testing.DummyRequest()
+                    request.POST['alert-spec'] = FieldStorageMock(alerts_test_file, alert_spec)
+                    request.POST['resource-spec'] = FieldStorageMock(invalid_resources_test_file, invalid_resource_spec)
+                    try:
+                        AlertsConfigurationAPI(request).post_alerts_specification()
+                        assert False, "No error was returned even though an inconsistent resource specification was sent"
+                    except HTTPBadRequest:
+                        pass  # we expect this to happen
+
+                alert_spec.seek(0)
+                # then send a consistent resource spec
+                with open(valid_resource_spec_abs_path) as valid_resource_spec:
+                    request = testing.DummyRequest()
+                    sfc, sfc_instance, alert_ids, topic_handlers = extract_alert_spec_data(alert_spec)
+                    alert_spec.seek(0)
+                    request.POST['alert-spec'] = FieldStorageMock(alerts_test_file, alert_spec)  # a simple mock class is used to mimic the FieldStorage class
+                    request.POST['resource-spec'] = FieldStorageMock(valid_resources_test_file, valid_resource_spec)
+                    clmc_service_response = AlertsConfigurationAPI(request).post_alerts_specification()
+
+            assert (sfc, sfc_instance) == (clmc_service_response["service_function_chain_id"], clmc_service_response["service_function_chain_instance_id"]), \
+                "Incorrect extraction of metadata for file {0}". format(alerts_test_file)
+            assert "triggers_specification_errors" not in clmc_service_response, "Unexpected error was returned for triggers specification"
+            assert "triggers_action_errors" not in clmc_service_response, "Unexpected error was returned for handlers specification"
+
+            # traverse through all alert IDs and check that they are created within Kapacitor
+            for alert_id in alert_ids:
+                kapacitor_response = get("http://localhost:9092/kapacitor/v1/tasks/{0}".format(alert_id))
+                assert kapacitor_response.status_code == 200, "Alert with ID {0} was not created - test file {1}.".format(alert_id, alerts_test_file)
+                kapacitor_response_json = kapacitor_response.json()
+                assert "link" in kapacitor_response_json, "Incorrect response from kapacitor for alert with ID {0} - test file {1}".format(alert_id, alerts_test_file)
+                assert kapacitor_response_json["status"] == "enabled", "Alert with ID {0} was created but is disabled - test file {1}".format(alert_id, alerts_test_file)
+                assert kapacitor_response_json["executing"], "Alert with ID {0} was created and is enabled, but is not executing - test file {1}".format(alert_id, alerts_test_file)
+
+            # check that all topic IDs were registered within Kapacitor
+            topic_ids = list(topic_handlers.keys())
+            kapacitor_response = get("http://localhost:9092/kapacitor/v1/alerts/topics")
+            assert kapacitor_response.status_code == 200, "Kapacitor couldn't return the list of created topics - test file {0}".format(alerts_test_file)
+            kapacitor_response_json = kapacitor_response.json()
+            kapacitor_defined_topics = [topic["id"] for topic in kapacitor_response_json["topics"]]
+            assert set(topic_ids).issubset(kapacitor_defined_topics), "Not all topic IDs were created within kapacitor - test file {0}".format(alerts_test_file)
+
+            # check that all handler IDs were created and each of them is subscribed to the correct topic ID
+            for topic_id in topic_handlers:
+                for handler_id, handler_url in topic_handlers[topic_id]:
+                    kapacitor_response = get("http://localhost:9092/kapacitor/v1/alerts/topics/{0}/handlers/{1}".format(topic_id, handler_id))
+                    assert kapacitor_response.status_code == 200, "Handler with ID {0} for topic with ID {1} doesn't exist - test file {2}".format(handler_id, topic_id, alerts_test_file)
+                    kapacitor_response_json = kapacitor_response.json()
+                    assert kapacitor_response_json["id"] == handler_id, "Incorrect ID of handler {0} in the Kapacitor response - test file {1}".format(handler_id, alerts_test_file)
+                    assert kapacitor_response_json["kind"] == "post", "Incorrect kind of handler {0} in the Kapacitor response - test file {1}".format(handler_id, alerts_test_file)
+                    assert kapacitor_response_json["options"]["url"], "Incorrect url of handler {0} in the Kapacitor response - test file {1}".format(handler_id, alerts_test_file)
+
+            # send the same spec again to check that error messages are returned (because of ID duplication)
+            with open(alert_spec_abs_path) as alert_spec:
+                with open(valid_resource_spec_abs_path) as valid_resource_spec:
+                    request.POST['alert-spec'] = FieldStorageMock(alerts_test_file, alert_spec)  # a simple mock class is used to mimic the FieldStorage class
+                    request.POST['resource-spec'] = FieldStorageMock(valid_resources_test_file, valid_resource_spec)
+                    clmc_service_response = AlertsConfigurationAPI(request).post_alerts_specification()
+            assert (sfc, sfc_instance) == (clmc_service_response["service_function_chain_id"], clmc_service_response["service_function_chain_instance_id"]), \
+                "Incorrect extraction of metadata for file {0}". format(alerts_test_file)
+
+            assert len(clmc_service_response["triggers_specification_errors"]) == len(alert_ids), "Expected errors were not returned for triggers specification"
+            handlers_count = sum([len(topic_handlers[topic]) for topic in topic_handlers])
+            assert len(clmc_service_response["triggers_action_errors"]) == handlers_count, "Expected errors were not returned for handlers specification"
+
+            clear_kapacitor_alerts(alert_ids, topic_handlers)
+
+
+class FieldStorageMock(object):
+
+    def __init__(self, filename, file):
+        """
+        Used to mock the behaviour of the cgi.FieldStorage class - two attributes needed only to forward a file to the view.
+
+        :param filename: file name
+        :param file: file object
+        """
+
+        self.filename = filename
+        self.file = file
+
+
+def extract_alert_spec_data(alert_spec):
+    """
+    A utility function to extract the expected alert, handler and topic identifiers from a given alert specification.
+
+    :param alert_spec: the alert specification file (file object)
+    :return: a tuple containing sfc_id and sfc_instance_id along with a list and a dictionary of generated IDs (alert IDs (list), topic IDs linked to handler IDs (dict))
+    """
+
+    yaml_alert_spec = load(alert_spec)
+    adjust_tosca_definitions_import(yaml_alert_spec)
+    tosca_tpl = ToscaTemplate(yaml_dict_tpl=yaml_alert_spec)
+    sfc, sfc_instance = tosca_tpl.tpl["metadata"]["sfc"], tosca_tpl.tpl["metadata"]["sfci"]
+
+    alert_ids = []  # saves all alert IDs in a list
+    topic_handlers = {}  # saves all topics in a dictionary, each topic is linked to a list of handler pairs (a handler pair consists of handler id and handler url)
+
+    for policy in tosca_tpl.policies:
+        policy_id = policy.name
+        for trigger in policy.triggers:
+            trigger_id = trigger.name
+
+            topic_id = "{0}\n{1}\n{2}\n{3}".format(sfc, sfc_instance, policy_id, trigger_id)
+            topic_id = AlertsConfigurationAPI.get_hash(topic_id)
+            topic_handlers[topic_id] = []
+
+            alert_id = topic_id
+            alert_ids.append(alert_id)
+
+            for handler_url in trigger.trigger_tpl["action"]["implementation"]:
+                handler_host = urlparse(handler_url).hostname
+                handler_id = "{0}\n{1}\n{2}\n{3}\n{4}".format(sfc, sfc_instance, policy_id, trigger_id, handler_host)
+                handler_id = AlertsConfigurationAPI.get_hash(handler_id)
+                topic_handlers[topic_id].append((handler_id, handler_url))
+
+    return sfc, sfc_instance, alert_ids, topic_handlers
+
+
+def clear_kapacitor_alerts(alert_ids, topic_handlers):
+    """
+    A utility function to clean up Kapacitor from the configured alerts, topics and handlers.
+
+    :param alert_ids: the list of alert IDs to delete
+    :param topic_handlers: the dictionary of topic and handlers to delete
+    """
+
+    for alert_id in alert_ids:
+        kapacitor_response = delete("http://localhost:9092/kapacitor/v1/tasks/{0}".format(alert_id))  # delete alert
+        assert kapacitor_response.status_code == 204
+
+    for topic_id in topic_handlers:
+        for handler_id, handler_url in topic_handlers[topic_id]:
+            kapacitor_response = delete("http://localhost:9092/kapacitor/v1/alerts/topics/{0}/handlers/{1}".format(topic_id, handler_id))  # delete handler
+            assert kapacitor_response.status_code == 204
+
+        kapacitor_response = delete("http://localhost:9092/kapacitor/v1/alerts/topics/{0}".format(topic_id))  # delete topic
+        assert kapacitor_response.status_code == 204
diff --git a/src/service/clmcservice/alertsapi/utilities.py b/src/service/clmcservice/alertsapi/utilities.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2eec79beb2af43a3ec605656264da70bbbcbec9
--- /dev/null
+++ b/src/service/clmcservice/alertsapi/utilities.py
@@ -0,0 +1,334 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          16-08-2018
+//      Created for Project :   FLAME
+"""
+
+
+# Python standard libs
+from os.path import join
+
+# PIP installed libs
+from yaml import load
+
+# CLMC-service imports
+from clmcservice import ROOT_DIR
+
+
+CLMC_ALERTS_TOSCA_DEFINITIONS_REL_PATH = ["static", "flame_clmc_alerts_definitions.yaml"]
+
+CLMC_ALERTS_TOSCA_DEFINITIONS_ABS_PATH = join(ROOT_DIR, *CLMC_ALERTS_TOSCA_DEFINITIONS_REL_PATH)
+
+CLMC_ALERTS_TOSCA_DEFINITIONS_FILE = CLMC_ALERTS_TOSCA_DEFINITIONS_REL_PATH[-1]
+
+
+def adjust_tosca_definitions_import(alert_spec):
+    """
+    A utility function to adjust any imports of flame_clmc_alerts_definitions.yaml to point to the correct location of
+    the tosca definitions file.
+
+    :param alert_spec: the TOSCA alert specification content (yaml dict)
+    """
+
+    global CLMC_ALERTS_TOSCA_DEFINITIONS_ABS_PATH
+
+    try:
+        import_index = alert_spec["imports"].index(CLMC_ALERTS_TOSCA_DEFINITIONS_FILE)
+        alert_spec["imports"][import_index] = CLMC_ALERTS_TOSCA_DEFINITIONS_ABS_PATH
+    except Exception:
+        pass  # nothing to replace if the import is not specified (either imports are missed, or no reference to the clmc tosca definitions file)
+
+
+def get_resource_spec_topic_ids(resource_spec_reference):
+    """
+    Tries to extract all event identifiers from a TOSCA resource specification
+
+    :param resource_spec_reference: the resource specification file reference from the POST HTTP request
+
+    :return: sfc ID, sfc instance ID and the list of topic IDs
+    """
+
+    resource_spec = load(resource_spec_reference.file)
+
+    topic_ids = []
+    sfc, sfc_i = resource_spec["metadata"]["sfc"], resource_spec["metadata"]["sfci"]
+
+    policies = resource_spec["topology_template"]["policies"]
+    for policy in policies:
+        policy = list(policy.items())[0]
+        policy_id, policy_object = policy[0], policy[1]
+
+        if policy_object["type"] == "eu.ict-flame.policies.StateChange":
+            triggers = policy_object["triggers"]
+
+            for trigger in triggers.values():
+                event = trigger["condition"]["constraint"]
+                source, event_id = event.split("::")
+                if source.lower() == "clmc":  # only take those event IDs that have clmc set as their source
+                    topic_ids.append("{0}\n{1}".format(policy_id, event_id))
+
+    return sfc, sfc_i, topic_ids
+
+
+def get_alert_spec_topic_ids(alerts_spec_tpl):
+    """
+    Tries to extract all event identifiers from a TOSCA alerts specification
+
+    :param alerts_spec_tpl: the alerts specification TOSCA template object
+
+    :return: the list of topic IDs
+    """
+
+    topic_ids = []
+
+    for policy in alerts_spec_tpl.policies:
+        policy_id = policy.name
+
+        for trigger in policy.triggers:
+            trigger_id = trigger.name
+
+            topic_id = "{0}\n{1}".format(policy_id, trigger_id)
+            topic_ids.append(topic_id)
+
+    return topic_ids
+
+
+def fill_http_post_handler_vars(handler_id, handler_url):
+    """
+    Creates a dictionary object ready to be posted to kapacitor to create an alert handler.
+
+    :param handler_id: handler identifier
+    :param handler_url: url to post alerts to
+
+    :return: a dictionary object ready to be posted to kapacitor to create an alert handler.
+    """
+
+    return {
+        "id": handler_id,
+        "kind": "post",
+        "options": {
+            "url": handler_url
+        }
+    }
+
+
+class TICKScriptTemplateFiller:
+    """
+    A utility class used for TICK script templates filtering.
+    """
+
+    # a class variable used to hold the comparison operator used to build the where clause in TICK script templates,
+    # these differ if the where clause is built as a string opposed to when it is build as a lambda
+    _TEMPLATE_COMPARISON_OPERATOR = {"threshold": "=", "relative": "=", "deadman": "=="}
+
+    @staticmethod
+    def get_comparison_operator(template_type):
+        """
+        Get the correct comparison operator depending on the template type, if template type not recognized, return "=="
+
+        :param template_type: one of the template types, that are created within kapacitor
+
+        :return: the comparison operator that should be used in the template to build the where clause
+        """
+
+        return TICKScriptTemplateFiller._TEMPLATE_COMPARISON_OPERATOR.get(template_type, "==")
+
+    @staticmethod
+    def fill_template_vars(template_type, **kwargs):
+        """
+        A utility function acting as an entry poiny to the fill_<template_type>_template_vars() functions defined below.
+
+        :param template_type: the template type - e.g.
+        :param kwargs: keyword arguments to forward to the actual function that will be used
+
+        :return: the result of the actual function that will be used.
+        """
+
+        fill_function_name = "_fill_{0}_template_vars".format(template_type)
+        fill_function = getattr(TICKScriptTemplateFiller, fill_function_name)  # python functions are first-class objects !
+
+        return fill_function(**kwargs)
+
+    @staticmethod
+    def _fill_threshold_template_vars(db=None, measurement=None, field=None, influx_function=None, critical_value=None,
+                                      comparison_operator=None, alert_period=None, topic_id=None, where_clause=None, **kwargs):
+        """
+        Creates a dictionary object ready to be posted to kapacitor to create a "threshold" task from template.
+
+        :param db: db name
+        :param measurement: measurement name
+        :param field: field name
+        :param influx_function: influx function to use for querying
+        :param critical_value: critical value to compare with
+        :param comparison_operator: type of comparison
+        :param alert_period: alert period to query influx
+        :param topic_id: topic identifier
+        :param where_clause: (OPTIONAL) argument for filtering the influx query by tag values
+
+        :return: a dictionary object ready to be posted to kapacitor to create a "threshold" task from template.
+        """
+
+        comparison_lambda = '"real_value" {0} {1}'.format(comparison_operator, critical_value)  # build up lambda string, e.g. "real_value" >= 10
+
+        template_vars = {
+            "db": {
+                "type": "string",
+                "value": db
+            },
+            "measurement": {
+                "type": "string",
+                "value": measurement
+            },
+            "field": {
+                "type": "string",
+                "value": field
+            },
+            "influxFunction": {
+                "type": "string",
+                "value": influx_function
+            },
+            "comparisonLambda": {
+                "type": "lambda",
+                "value": comparison_lambda
+            },
+            "alertPeriod": {
+                "type": "duration",
+                "value": alert_period
+            },
+            "topicID": {
+                "type": "string",
+                "value": topic_id
+            }
+        }
+
+        if where_clause is not None:
+            template_vars["whereClause"] = {
+                "type": "string",
+                "value": where_clause
+            }
+
+        return template_vars
+
+    @staticmethod
+    def _fill_relative_template_vars(db=None, measurement=None, field=None, influx_function=None, critical_value=None, comparison_operator=None,
+                                     alert_period=None, topic_id=None, where_clause=None, **kwargs):
+        """
+        Creates a dictionary object ready to be posted to kapacitor to create a "relative" task from template.
+
+        :param db: db name
+        :param measurement: measurement name
+        :param field: field name
+        :param influx_function: influx function to use for querying
+        :param critical_value: critical value to compare with
+        :param comparison_operator: type of comparison
+        :param alert_period: alert period to use for relative comparison
+        :param topic_id: topic identifier
+        :param where_clause: (OPTIONAL) argument for filtering the influx query by tag values
+
+        :return: a dictionary object ready to be posted to kapacitor to create a "relative" task from template.
+        """
+
+        comparison_lambda = '"diff" {0} {1}'.format(comparison_operator, critical_value)
+
+        template_vars = {
+            "db": {
+                "type": "string",
+                "value": db
+            },
+            "measurement": {
+                "type": "string",
+                "value": measurement
+            },
+            "field": {
+                "type": "string",
+                "value": field
+            },
+            "influxFunction": {
+                "type": "string",
+                "value": influx_function
+            },
+            "comparisonLambda": {
+                "type": "lambda",
+                "value": comparison_lambda
+            },
+            "alertPeriod": {
+                "type": "duration",
+                "value": alert_period
+            },
+            "topicID": {
+                "type": "string",
+                "value": topic_id
+            }
+        }
+
+        if where_clause is not None:
+            template_vars["whereClause"] = {
+                "type": "string",
+                "value": where_clause
+            }
+
+        return template_vars
+
+    @staticmethod
+    def _fill_deadman_template_vars(db=None, measurement=None, critical_value=None, alert_period=None, topic_id=None, where_clause=None, **kwargs):
+        """
+        Creates a dictionary object ready to be posted to kapacitor to create a "deadman" task from template.
+
+        :param db: db name
+        :param measurement: measurement name
+        :param critical_value: critical value to compare with
+        :param alert_period: alert period to use for relative comparison
+        :param topic_id: topic identifier
+        :param where_clause: (OPTIONAL) argument for filtering the influx query by tag values
+
+        :return: a dictionary object ready to be posted to kapacitor to create a "deadman" task from template.
+        """
+
+        template_vars = {
+            "db": {
+                "type": "string",
+                "value": db
+            },
+            "measurement": {
+                "type": "string",
+                "value": measurement
+            },
+            "alertPeriod": {
+                "type": "duration",
+                "value": alert_period
+            },
+            "throughputThreshold": {
+                "type": "float",
+                "value": critical_value
+            },
+            "topicID": {
+                "type": "string",
+                "value": topic_id
+            }
+        }
+
+        if where_clause is not None:
+            template_vars["whereClause"] = {
+                "type": "lambda",
+                "value": where_clause
+            }
+
+        return template_vars
diff --git a/src/service/clmcservice/alertsapi/views.py b/src/service/clmcservice/alertsapi/views.py
new file mode 100644
index 0000000000000000000000000000000000000000..34490678d30774d4fee6ea445257d59113b7633c
--- /dev/null
+++ b/src/service/clmcservice/alertsapi/views.py
@@ -0,0 +1,321 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          14-08-2018
+//      Created for Project :   FLAME
+"""
+
+# Python standard libs
+import logging
+from urllib.parse import urlparse
+from hashlib import sha256
+
+# PIP installed libs
+from pyramid.httpexceptions import HTTPBadRequest
+from pyramid.view import view_defaults, view_config
+from yaml import load, YAMLError
+from toscaparser.tosca_template import ToscaTemplate
+from requests import post
+
+# CLMC-service imports
+from clmcservice.alertsapi.utilities import adjust_tosca_definitions_import, TICKScriptTemplateFiller, fill_http_post_handler_vars, get_resource_spec_topic_ids, get_alert_spec_topic_ids
+from clmcservice.alertsapi.alerts_specification_schema import COMPARISON_OPERATORS,  validate_clmc_alerts_specification
+
+# initialise logger
+log = logging.getLogger('service_logger')
+
+
+@view_defaults(renderer='json')
+class AlertsConfigurationAPI(object):
+    """
+    A class-based view for configuring alerts within CLMC.
+    """
+
+    def __init__(self, request):
+        """
+        Initialises the instance of the view with the request argument.
+
+        :param request: client's call request
+        """
+
+        self.request = request
+
+    @view_config(route_name='alerts_configuration', request_method='GET')
+    def get_alerts_hash(self):
+
+        for param in ("sfc", "sfci", "policy", "trigger"):
+            if param not in self.request.params:
+                raise HTTPBadRequest("Missing url query string parameter {0}.".format(param))
+
+        sfc = self.request.params["sfc"]
+        sfci = self.request.params["sfci"]
+        policy = self.request.params["policy"]
+        trigger = self.request.params["trigger"]
+
+        # generate topic and alert identifiers
+        topic_id = "{0}\n{1}\n{2}\n{3}".format(sfc, sfci, policy, trigger)
+        topic_id = self.get_hash(topic_id)
+        alert_id = topic_id
+
+        return {
+            "task_identifier": alert_id,
+            "task_api_endpoint": "/kapacitor/v1/tasks/{0}".format(alert_id),
+            "topic_identifier": topic_id,
+            "topic_api_endpoint": "/kapacitor/v1/alerts/topics/{0}".format(topic_id),
+            "topic_handlers_api_endpoint": "/kapacitor/v1/alerts/topics/{0}/handlers".format(topic_id)
+        }
+
+    @view_config(route_name='alerts_configuration', request_method='POST')
+    def post_alerts_specification(self):
+        """
+        The view for receiving and configuring alerts based on the TOSCA alerts specification document. This endpoint must also receive the TOSCA resources specification document for validation.
+
+        :raises HTTPBadRequest: if the request doesn't contain a (YAML) file input referenced as alert-spec representing the TOSCA Alerts Specification
+        """
+
+        kapacitor_host, kapacitor_port = self.request.registry.settings['kapacitor_host'], self.request.registry.settings['kapacitor_port']
+
+        alert_spec_reference = self.request.POST.get('alert-spec')
+        resource_spec_reference = self.request.POST.get('resource-spec')
+        try:
+            resource_spec_sfc, resource_spec_sfc_i, resource_spec_topic_ids = get_resource_spec_topic_ids(resource_spec_reference)
+        except Exception as e:
+            log.error("Couldn't extract resource specification event IDs due to error: {0}".format(e))
+            raise HTTPBadRequest("Couldn't extract resource specification event IDs - invalid TOSCA resource specification.")
+
+        # check that the specification file was sent
+        if not hasattr(alert_spec_reference, "file") or not hasattr(alert_spec_reference, "filename"):
+            raise HTTPBadRequest("Request to this API endpoint must include a (YAML) file input referenced as 'alert-spec' representing the TOSCA Alerts Specification.")
+
+        # extract alert specification file and filename
+        alerts_input_filename = alert_spec_reference.filename
+        alerts_input_file = alert_spec_reference.file
+
+        if not alerts_input_filename.lower().endswith('.yaml'):
+            raise HTTPBadRequest("Request to this API endpoint must include a (YAML) file input referenced as 'alert-spec' representing the TOSCA Alerts Specification.")
+
+        # parse the alerts specification file
+        try:
+            alerts_yaml_content = load(alerts_input_file)
+            adjust_tosca_definitions_import(alerts_yaml_content)
+        except YAMLError as err:
+            log.error("Couldn't parse user request file {0} to yaml format due to error: {1}".format(alerts_input_filename, err))
+            log.error("Invalid content is: {0}".format(alerts_input_file.read()))
+            raise HTTPBadRequest("Request alert specification file could not be parsed as valid YAML document.")
+
+        try:
+            tosca_tpl = ToscaTemplate(yaml_dict_tpl=alerts_yaml_content)
+        except Exception as e:
+            log.error(e)
+            raise HTTPBadRequest("Request alert specification file could not be parsed as a valid TOSCA document.")
+
+        valid_alert_spec = validate_clmc_alerts_specification(tosca_tpl.tpl)
+        if not valid_alert_spec:
+            raise HTTPBadRequest("Request alert specification file could not be validated as a CLMC TOSCA alerts specification document.")
+
+        alert_spec_topic_ids = get_alert_spec_topic_ids(tosca_tpl)
+        sfc, sfc_instance = tosca_tpl.tpl["metadata"]["sfc"], tosca_tpl.tpl["metadata"]["sfci"]
+
+        # do validation between the two TOSCA documents
+        self._compare_alert_and_resource_spec(sfc, sfc_instance, alert_spec_topic_ids, resource_spec_sfc, resource_spec_sfc_i, resource_spec_topic_ids)
+
+        db = sfc  # ASSUMPTION: database per service function chain, named after the service function chain ID
+        # two lists to keep track of any errors while interacting with the Kapacitor HTTP API
+        alert_tasks_errors = []
+        alert_handlers_errors = []
+
+        # iterate through every policy and extract all triggers of the given policy
+        self._config_kapacitor_alerts(tosca_tpl, sfc, sfc_instance, db, kapacitor_host, kapacitor_port, alert_tasks_errors, alert_handlers_errors)
+
+        return_msg = {"msg": "Alerts specification has been successfully validated and forwarded to Kapacitor", "service_function_chain_id": sfc,
+                      "service_function_chain_instance_id": sfc_instance}
+
+        if len(alert_tasks_errors) > 0:
+            return_msg["triggers_specification_errors"] = alert_tasks_errors
+            self.request.response.status = 400
+
+        if len(alert_handlers_errors) > 0:
+            return_msg["triggers_action_errors"] = alert_handlers_errors
+            self.request.response.status = 400
+
+        return return_msg
+
+    def _compare_alert_and_resource_spec(self, alert_spec_sfc, alert_spec_sfc_instance, alert_spec_topics, resource_spec_sfc, resource_spec_sfc_instance, resource_spec_topics):
+        """
+        Compares the extracted values from the resource spec against the values from the alerts spec - validation that they refer to the same things,
+
+        :param alert_spec_sfc: sfc from alert spec
+        :param alert_spec_sfc_instance: sfc instance from alert spec
+        :param alert_spec_topics: policy/trigger IDs from alert spec
+        :param resource_spec_sfc: sfc from resource spec
+        :param resource_spec_sfc_instance: sfc instance from resource spec
+        :param resource_spec_topics: policy/trigger IDs from resource spec
+
+        :raises: HTTP Bad Request if the two specifications are inconsistent
+        """
+
+        if alert_spec_sfc != resource_spec_sfc:
+            raise HTTPBadRequest("Different service function chain ID used in the alert and resource specification documents: {0} != {1}".format(alert_spec_sfc, resource_spec_sfc))
+
+        if alert_spec_sfc_instance != resource_spec_sfc_instance:
+            raise HTTPBadRequest("Different service function chain instance ID used in the alert and resource specification documents: {0} != {1}".format(alert_spec_sfc_instance, resource_spec_sfc_instance))
+
+        alert_spec_topics_set = set(alert_spec_topics)
+        missing_topic_ids = [topic_id for topic_id in resource_spec_topics if topic_id not in alert_spec_topics_set]
+
+        if len(missing_topic_ids) > 0:
+            missing_topic_ids = [topic_id.replace("\n", " : ") for topic_id in missing_topic_ids]
+            raise HTTPBadRequest("Couldn't match the following policy triggers from the resource specification with triggers defined in the alerts specification: {0}".format(missing_topic_ids))
+
+    def _config_kapacitor_alerts(self, tosca_tpl, sfc, sfc_instance, db, kapacitor_host, kapacitor_port, alert_tasks_errors, alert_handlers_errors):
+        """
+        Configures the alerts task and alert handlers within Kapacitor.
+
+        :param tosca_tpl: the parsed Tosca template object
+        :param sfc: sfc ID
+        :param sfc_instance: sfc instance ID
+        :param db: Influx database ID
+        :param kapacitor_host: default host is localhost (CLMC service running on the same machine as Kapacitor)
+        :param kapacitor_port: default value to use is 9092
+        :param alert_tasks_errors: the list for tracking errors while interacting with Kapacitor tasks
+        :param alert_handlers_errors: the list for tracking errors while interacting with Kapacitor alert handlers
+
+        :return: the list of successfully registered event identifiers
+        """
+
+        for policy in tosca_tpl.policies:
+            for trigger in policy.triggers:
+                event_id = trigger.name
+                policy_id = policy.name
+
+                event_type = trigger.trigger_tpl["event_type"]
+                template_id = "{0}-template".format(event_type)
+                measurement, field = trigger.trigger_tpl["metric"].split(".")
+
+                condition = trigger.trigger_tpl["condition"]
+                critical_value = float(condition["threshold"])
+                alert_period = "{0}s".format(condition["granularity"])
+                influx_function = condition.get("aggregation_method", "mean")  # if not specified, use "mean"
+
+                # check for tag filtering
+                where_clause = None
+                if "resource_type" in trigger.trigger_tpl["condition"]:
+                    tags = condition["resource_type"]
+                    # make sure alert tasks are executing with queries for the given sfc and sfc instance
+                    tags["flame_sfc"] = sfc
+                    tags["flame_sfci"] = sfc_instance
+
+                    # NOTE: if the template has its where clause defined as lambda (stream templates), then use "==" as comparison operator,
+                    #       else if the template's where clause is defined as a string (batch templates), then use "=" as comparison operator
+                    filter_comparison_operator = TICKScriptTemplateFiller.get_comparison_operator(event_type)  # retrieves the correct comparison operator to use for building the where clause
+
+                    # build up the where clause from the tags dictionary
+                    where_clause = " AND ".join(map(lambda tag_name: '"{0}"{1}\'{2}\''.format(tag_name, filter_comparison_operator, tags[tag_name]), tags))
+
+                comparison_operator = COMPARISON_OPERATORS[condition.get("comparison_operator", "gte")]  # if not specified, use "gte" (>=)
+
+                # generate topic and alert identifiers
+                topic_id = "{0}\n{1}\n{2}\n{3}".format(sfc, sfc_instance, policy_id, event_id)  # scoped per service function chain instance (no two sfc instances report to the same topic)
+                topic_id = self.get_hash(topic_id)
+                alert_id = topic_id
+
+                # built up the template vars dictionary depending on the event type (threshold, relative, etc.)
+                # all extracted properties from the trigger are passed, the TICKScriptTemplateFiller entry point then forwards those to the appropriate function for template filling
+                template_vars = TICKScriptTemplateFiller.fill_template_vars(event_type, db=db, measurement=measurement, field=field, influx_function=influx_function,
+                                                                            critical_value=critical_value, comparison_operator=comparison_operator, alert_period=alert_period,
+                                                                            topic_id=topic_id, where_clause=where_clause)
+
+                # create and activate alert task through the kapacitor HTTP API
+                kapacitor_api_tasks_url = "http://{0}:{1}/kapacitor/v1/tasks".format(kapacitor_host, kapacitor_port)
+                kapacitor_http_request_body = {
+                    "id": alert_id,
+                    "template-id": template_id,
+                    "dbrps": [{"db": db, "rp": "autogen"}],
+                    "status": "enabled",
+                    "vars": template_vars
+                }
+
+                # send the request and receive a response
+                response = post(kapacitor_api_tasks_url, json=kapacitor_http_request_body)
+                response_content = response.json()
+                # log the response
+                log.info(response_content, response.status_code)
+
+                # track all reported errors
+                if response_content.get("error", "") != "":
+                    alert_tasks_errors.append({
+                        "policy": policy_id,
+                        "trigger": event_id,
+                        "error": response_content.get("error")
+                    })
+
+                # extract http handlers
+                http_handlers = trigger.trigger_tpl["action"]["implementation"]
+
+                # subscribe all http handlers to the created topic
+                self._config_kapacitor_alert_handlers(kapacitor_host, kapacitor_port, sfc, sfc_instance, policy_id, topic_id, event_id, http_handlers, alert_handlers_errors)
+
+    def _config_kapacitor_alert_handlers(self, kapacitor_host, kapacitor_port, sfc, sfc_i, policy_id, topic_id, event_id, http_handlers, alert_handlers_errors):
+        """
+        Handles the configuration of HTTP Post alert handlers.
+
+        :param kapacitor_host: default host is localhost (CLMC service running on the same machine as Kapacitor)
+        :param kapacitor_port: default value to use is 9092
+        :param sfc: service function chain identifier
+        :param sfc_i: service function chain instance identifier
+        :param policy_id: policy ID those triggers relate to
+        :param topic_id: topic ID built of sfc, sfc instance and event_id
+        :param event_id: name of trigger
+        :param http_handlers: list of handlers to subscribe
+        :param alert_handlers_errors: the list for tracking errors while interacting with Kapacitor alert handlers
+        """
+
+        kapacitor_api_handlers_url = "http://{0}:{1}/kapacitor/v1/alerts/topics/{2}/handlers".format(kapacitor_host, kapacitor_port, topic_id)
+        for http_handler_url in http_handlers:
+            http_handler_host = urlparse(http_handler_url).hostname
+            handler_id = "{0}\n{1}\n{2}\n{3}\n{4}".format(sfc, sfc_i, policy_id, event_id, http_handler_host)
+            handler_id = self.get_hash(handler_id)
+            kapacitor_http_request_body = fill_http_post_handler_vars(handler_id, http_handler_url)
+            response = post(kapacitor_api_handlers_url, json=kapacitor_http_request_body)
+            response_content = response.json()
+            log.info(response_content, response.status_code)
+
+            if response_content.get("error", "") != "":
+                alert_handlers_errors.append({
+                    "policy": policy_id,
+                    "trigger": event_id,
+                    "handler": http_handler_url,
+                    "error": response_content.get("error")
+                })
+
+    @staticmethod
+    def get_hash(message):
+        """
+        Returns the hash value of a message encoded with utf-8 using hash algorithm sha256
+
+        :param message: the message to hash
+
+        :return: the value of the has
+        """
+
+        byte_str = bytes(message, encoding="utf-8")
+        hash_obj = sha256(byte_str)
+
+        return hash_obj.hexdigest()
diff --git a/src/service/clmcservice/configapi/__init__.py b/src/service/clmcservice/configapi/__init__.py
deleted file mode 100644
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/configapi/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/src/service/clmcservice/configapi/conftest.py b/src/service/clmcservice/configapi/conftest.py
deleted file mode 100644
index 105ebe81d84e5bead8e73dd2d069026c58e20ee7..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/configapi/conftest.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/python3
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          03-07-2018
-//      Created for Project :   FLAME
-"""
-
-
-import pytest
-from clmcservice.whoamiapi.conftest import create_test_database, initialise_database, drop_test_database
-
-
-@pytest.fixture(scope='module', autouse=True)
-def testing_db_session():
-    test_database = "configtestdb"
-    create_test_database(test_database)  # create a database used for executing the unit tests
-    db_session, engine = initialise_database(test_database)  # initialise the database with the models and retrieve a db session
-
-    yield db_session  # return the db session if needed in any of the tests
-
-    db_session.remove()  # remove the db session
-    engine.dispose()  # dispose from the engine
-    drop_test_database(test_database)  # remove the test database
diff --git a/src/service/clmcservice/configapi/tests.py b/src/service/clmcservice/configapi/tests.py
deleted file mode 100644
index 39ee0ed900bd66f702960457aee532db404c3a89..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/configapi/tests.py
+++ /dev/null
@@ -1,315 +0,0 @@
-#!/usr/bin/python3
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          02-07-2018
-//      Created for Project :   FLAME
-"""
-
-import pytest
-from json import dumps
-from pyramid import testing
-from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound, HTTPConflict
-from clmcservice.models import ServiceFunctionChain
-from clmcservice.configapi.views import SFCConfigAPI
-
-
-class TestSFCConfigAPI(object):
-    """
-    A pytest-implementation test for the Config API endpoints for service function chains
-    """
-
-    @pytest.fixture(autouse=True)
-    def app_config(self):
-        """
-        A fixture to implement setUp/tearDown functionality for all tests by initializing configuration structure for the web service and db connection
-        """
-
-        self.registry = testing.setUp()
-
-        yield
-
-        testing.tearDown()
-        ServiceFunctionChain.delete_all()  # clear the instances of the model in the test database
-
-    def test_get_all(self):
-        """
-        Tests the GET all method of the config API for service function chains - returns a list of all service function chains from the database.
-        """
-
-        request = testing.DummyRequest()
-        response = SFCConfigAPI(request).get_all()
-        assert response == [], "Initially there mustn't be any service function chains in the database."
-
-        sfc = ServiceFunctionChain(sfc="sfc1", chain={"nginx": ["minio"]})
-        expected_response_data = [sfc.json]
-        ServiceFunctionChain.add(sfc)  # adds the new instance of the model to the database
-
-        request = testing.DummyRequest()
-        response = SFCConfigAPI(request).get_all()
-        assert response == expected_response_data, "Incorrect response data with 1 service function chain."
-
-        sfc = ServiceFunctionChain(sfc="sfc2", chain={"nginx": ["minio"]})
-        expected_response_data.append(sfc.json)
-        ServiceFunctionChain.add(sfc)
-        sfc = ServiceFunctionChain(sfc="sfc3", chain={"nginx": ["minio"]})
-        expected_response_data.append(sfc.json)
-        ServiceFunctionChain.add(sfc)
-
-        request = testing.DummyRequest()
-        response = SFCConfigAPI(request).get_all()
-        assert response == expected_response_data, "Incorrect response data with more than 1 service function chains."
-
-    def test_get_one(self):
-        """
-        Tests the GET one method of the config API for service function chains - returns a service function chain from the database.
-        """
-
-        request = testing.DummyRequest()
-        response = SFCConfigAPI(request).get_all()
-        assert response == [], "Initially there mustn't be any service function chains in the database."
-
-        self._validation_of_url_parameters_test("get_one")
-
-        sfc = ServiceFunctionChain(sfc="sfc1", chain={"nginx": ["minio"]})
-        expected_response_data = sfc.json
-        ServiceFunctionChain.add(sfc)  # adds the new instance of the model to the database
-
-        request = testing.DummyRequest()
-        request.params["sfc"] = "sfc1"
-        response = SFCConfigAPI(request).get_one()
-        assert response == expected_response_data, "Invalid data returned in the response of GET instance"
-
-        request = testing.DummyRequest()
-        request.params["sfc"] = "sfc2"
-        error_raised = False
-        try:
-            SFCConfigAPI(request).get_one()
-        except HTTPNotFound:
-            error_raised = True
-        assert error_raised, "Not found error must be raised in case of a non existing service function chain"
-
-    def test_post(self):
-        """
-        Tests the POST method of the config API for service function chains - creates a service function chain in the database.
-        """
-
-        request = testing.DummyRequest()
-        response = SFCConfigAPI(request).get_all()
-        assert response == [], "Initially there mustn't be any service function chains in the database."
-
-        resource = dict(sfc="sfc1", chain={"nginx": ["minio"]})
-        json_data = dumps(resource)
-        request = testing.DummyRequest()
-        request.body = json_data.encode(request.charset)
-        response = SFCConfigAPI(request).post()
-        assert response == resource, "POST request must return the created resource"
-        assert ServiceFunctionChain.exists("sfc1"), "POST request must have created the resource"
-
-        resource["chain"] = {}
-        json_data = dumps(resource)
-        request = testing.DummyRequest()
-        request.body = json_data.encode(request.charset)
-        error_raised = False
-        try:
-            SFCConfigAPI(request).post()
-        except HTTPConflict:
-            error_raised = True
-        assert error_raised, "An error must be raised when trying to create a resource which breaks the unique constraint"
-
-    @pytest.mark.parametrize("body, valid", [
-        ('{"sfc": "sfc1", "chain":{"nginx":["minio"]}}', True),
-        ('{"sfc": "sfc2", "chain":{}}', True),
-        ('{"sfc": "sfc1", "chain":[]}', False),
-        ('{}', False),
-        ('{"sfc": "sfc3"}', False),
-        ('{"sf": "sfc2", "sf_i": "sfc_i2", "chain":{}', False),
-        ('{invalid json}', False),
-    ])
-    def test_post_body_validation(self, body, valid):
-        """
-        Tests the POST request validation of the body content.
-
-        :param body: The request body to be validated
-        :param valid: True if body is valid, False otherwise
-        """
-
-        request = testing.DummyRequest()
-        request.body = body.encode(request.charset)
-        error_raised = False
-        try:
-            SFCConfigAPI(request).post()
-        except HTTPBadRequest:
-            error_raised = True
-        assert error_raised == (not valid), "An error must be raised in case of an invalid request body"
-
-    def test_put(self):
-        """
-        Tests the PUT method of the Config API for service function chains - overwrites a service function chain from the database.
-        """
-
-        request = testing.DummyRequest()
-        response = SFCConfigAPI(request).get_all()
-        assert response == [], "Initially there mustn't be any service function chains in the database."
-
-        self._validation_of_url_parameters_test("put")
-
-        resource = dict(sfc="sfc1", chain={"nginx": ["minio"]})
-        body = dumps(resource)
-        request = testing.DummyRequest()
-        request.params["sfc"] = "sfc1"
-        request.body = body.encode(request.charset)
-        error_raised = False
-        try:
-            SFCConfigAPI(request).put()
-        except HTTPNotFound:
-            error_raised = True
-        assert error_raised, "Not found error must be raised in case of a non existing service function chain"
-
-        sfc = ServiceFunctionChain(sfc="sfc1", chain={"nginx": ["minio"]})
-        ServiceFunctionChain.add(sfc)  # adds the new instance of the model to the database
-
-        resource = dict(sfc="sfc1", chain={})
-        body = dumps(resource)
-        request = testing.DummyRequest()
-        request.params["sfc"] = "sfc1"
-        request.body = body.encode(request.charset)
-        response = SFCConfigAPI(request).put()
-        assert response == resource, "PUT request must return the updated resource"
-        assert ServiceFunctionChain.get("sfc1").json["chain"] == {}
-
-        resource = dict(sfc="sfc2", chain={"nginx": ["minio"]})
-        body = dumps(resource)
-        request = testing.DummyRequest()
-        request.params["sfc"] = "sfc1"
-        request.body = body.encode(request.charset)
-        response = SFCConfigAPI(request).put()
-        assert response == resource, "PUT request must return the updated resource"
-        assert not ServiceFunctionChain.exists("sfc1"), "Resource has not been updated"
-        assert ServiceFunctionChain.exists("sfc2"), "Resource has not been updated"
-
-        sfc = ServiceFunctionChain(sfc="sfc1", chain={"nginx": ["minio"]})
-        ServiceFunctionChain.add(sfc)  # adds the new instance of the model to the database
-
-        resource = dict(sfc="sfc2", chain={"nginx": ["minio"]})
-        body = dumps(resource)
-        request = testing.DummyRequest()
-        request.params["sfc"] = "sfc1"
-        request.body = body.encode(request.charset)
-        error_raised = False
-        try:
-            SFCConfigAPI(request).put()
-        except HTTPConflict:
-            error_raised = True
-        assert error_raised, "PUT request breaks unique constraint"
-
-    @pytest.mark.parametrize("body, valid", [
-        ('{"sfc": "sfc1", "chain":{"nginx":["minio"]}}', True),
-        ('{"sfc": "sfc2", "chain":{}}', True),
-        ('{"sfc": "sfc1", "chain":[]}', False),
-        ('{}', False),
-        ('{"sfc": "sfc3"}', False),
-        ('{"sf": "sfc2", "sf_i": "sfc_i2", "chain":{}', False),
-        ('{invalid json}', False),
-    ])
-    def test_put_body_validation(self, body, valid):
-        """
-        Tests the PUT request validation of the body content.
-
-        :param body: The request body to be validated
-        :param valid: True if body is valid, False otherwise
-        """
-
-        sfc = ServiceFunctionChain(sfc="sfc1", chain={"nginx": ["minio"]})
-        ServiceFunctionChain.add(sfc)  # adds the new instance of the model to the database
-
-        request = testing.DummyRequest()
-        request.params["sfc"] = "sfc1"
-        request.body = body.encode(request.charset)
-        error_raised = False
-        try:
-            SFCConfigAPI(request).put()
-        except HTTPBadRequest:
-            error_raised = True
-        assert error_raised == (not valid), "An error must be raised in case of an invalid request body"
-
-    def test_delete(self):
-        """
-        Tests the DELETE method of the config API for service function chains - deletes a service function chain from the database.
-        """
-
-        request = testing.DummyRequest()
-        response = SFCConfigAPI(request).get_all()
-        assert response == [], "Initially there mustn't be any service function chains in the database."
-
-        self._validation_of_url_parameters_test("delete")
-
-        sfc = ServiceFunctionChain(sfc="sfc1", chain={"nginx": ["minio"]})
-        to_delete = sfc.json
-        ServiceFunctionChain.add(sfc)  # adds the new instance of the model to the database
-
-        assert ServiceFunctionChain.exists("sfc1")
-
-        request = testing.DummyRequest()
-        request.params["sfc"] = "sfc1"
-        response = SFCConfigAPI(request).delete()
-        assert response == to_delete, "DELETE must return the deleted object if successful"
-
-        assert not ServiceFunctionChain.exists("sfc1"), "Resource must be deleted after the delete API method has been called."
-
-        request = testing.DummyRequest()
-        request.params["sfc"] = "sfc1"
-        error_raised = False
-        try:
-            SFCConfigAPI(request).delete()
-        except HTTPNotFound:
-            error_raised = True
-        assert error_raised, "Not found error must be raised in case of a non existing service function chain"
-
-    @staticmethod
-    def _validation_of_url_parameters_test(method):
-        """
-        Validates the way a config API method handles url query parameters for service function chains
-
-        :param method: the method to test
-        """
-
-        request = testing.DummyRequest()
-        error_raised = False
-        try:
-            getattr(SFCConfigAPI(request), method).__call__()
-        except HTTPBadRequest:
-            error_raised = True
-        assert error_raised, "Error must be raised in case of no URL parameters"
-
-        request = testing.DummyRequest()
-        request.params["sfc_i"] = "sfc1"  # argument should be sfc
-        try:
-            getattr(SFCConfigAPI(request), method).__call__()
-        except HTTPBadRequest:
-            error_raised = True
-        assert error_raised, "Error must be raised in case of insufficient number of arguments"
-
-        request = testing.DummyRequest()
-        request.params["sf"] = "sfc1"  # argument should be sfc
-        try:
-            getattr(SFCConfigAPI(request), method).__call__()
-        except HTTPBadRequest:
-            error_raised = True
-        assert error_raised, "Error must be raised in case of invalid naming of arguments"
diff --git a/src/service/clmcservice/configapi/utilities.py b/src/service/clmcservice/configapi/utilities.py
deleted file mode 100644
index 0605c0599c5b1a9224001cfa603f3d7fd9a01c9b..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/configapi/utilities.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/python3
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          02-07-2018
-//      Created for Project :   FLAME
-"""
-
-from json import loads
-from clmcservice.models import ServiceFunctionChain
-
-
-def validate_sfchain_body(body):
-    """
-    Validates the request body used to create an service function chain resource in the database.
-
-    :param body: the request body to validate
-    :return the validated sfc dictionary object
-    :raise AssertionError: if the body is not a valid service function chain
-    """
-
-    try:
-        body = loads(body)
-    except:
-        raise AssertionError("Service function chain must be represented by a JSON object.")
-
-    assert len(body) == len(ServiceFunctionChain.__table__.columns), "Service function chain JSON object mustn't contain a different number of attributes than the number of required ones."
-
-    # validate that all required attributes are given in the body
-    for attribute in ServiceFunctionChain.__table__.columns:
-        assert attribute.name in body, "Required attribute not found in the request content."
-
-    assert type(body["chain"]) == dict, "The chain attribute of a service function chain must be a graph representing the relations between service functions."
-
-    for sf in body["chain"]:
-        assert type(body["chain"][sf]) == list, "A list must be used to represent each dependency between service functions"
-
-    return body
diff --git a/src/service/clmcservice/configapi/views.py b/src/service/clmcservice/configapi/views.py
deleted file mode 100644
index 4cfdb69b6e842aa460c4a330961c339acd7421c0..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/configapi/views.py
+++ /dev/null
@@ -1,185 +0,0 @@
-#!/usr/bin/python3
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          02-07-2018
-//      Created for Project :   FLAME
-"""
-
-
-from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPNotFound
-from pyramid.view import view_defaults, view_config
-from clmcservice.models import ServiceFunctionChain
-from clmcservice.configapi.utilities import validate_sfchain_body
-
-
-@view_defaults(renderer='json')
-class SFCConfigAPI(object):
-    """
-    A class-based view for posting and retrieving configuration data for service function chains to the CLMC service
-    """
-
-    def __init__(self, request):
-        """
-        Initialises the instance of the view with the request argument.
-
-        :param request: client's call request
-        """
-
-        self.request = request
-
-    @view_config(route_name='config_sfc', request_method='GET')
-    def get_all(self):
-        """
-        GET API call for all resources.
-
-        :return: A list of all service function chains found in the database.
-        """
-
-        return [instance.json for instance in ServiceFunctionChain.query()]
-
-    @view_config(route_name='config_sfc_instance', request_method='GET')
-    def get_one(self):
-        """
-        GET API call for a single resources.
-
-        :return: One service function chain instance retrieved from the database by querying the sfc ID
-        :raises HTTPBadRequest: if the request parameters are invalid(invalid url query string)
-        :raises HTTPNotFound: if a resource with the given parameters doesn't exist in the database
-        """
-
-        sf_chain = self._get_sf_chain_from_url_string()
-        if sf_chain is None:
-            raise HTTPNotFound("A service function chain with the given parameters doesn't exist.")
-        else:
-            return sf_chain.json
-
-    @view_config(route_name='config_sfc', request_method='POST')
-    def post(self):
-        """
-        A POST API call to create a new service function chain.
-
-        :return: A JSON response to the POST call - essentially with the data of the new resource
-        :raises HTTPBadRequest: if request body is not a valid JSON for the service function chain
-        :raises HTTPConflict: if the unique constraints are not preserved after the creation of a new instance
-        """
-
-        # create an instance of the model and add it to the database table
-        sf_chain = self._validate_and_create()
-        json_data = sf_chain.json
-        ServiceFunctionChain.add(sf_chain)
-
-        self.request.response.status = 201
-
-        return json_data
-
-    @view_config(route_name='config_sfc_instance', request_method='PUT')
-    def put(self):
-        """
-        A PUT API call to update a service function chain.
-
-        :return: A JSON response representing the updated object
-        :raises HTTPBadRequest: if the request parameters are invalid(invalid url query string)
-        :raises HTTPNotFound: if a resource with the given parameters doesn't exist in the database
-        """
-
-        sf_chain = self._get_sf_chain_from_url_string()
-        if sf_chain is None:
-            raise HTTPNotFound("A service function chain with the given ID doesn't exist.")
-        else:
-            try:
-                body = self.request.body.decode(self.request.charset)
-                validated_body = validate_sfchain_body(body)  # validate the content and receive a json dictionary object
-            except AssertionError as e:
-                raise HTTPBadRequest("Bad request content. Service function chain format is incorrect: {0}".format(e.args))
-
-            new_resource = validated_body
-            old_resource = sf_chain.json
-            updating = new_resource["sfc"] == old_resource["sfc"]
-
-            if updating:
-                ServiceFunctionChain.delete(sf_chain)
-                new_sf_chain = ServiceFunctionChain(**validated_body)
-                ServiceFunctionChain.add(new_sf_chain)
-            else:
-                resource_exists = ServiceFunctionChain.exists(new_resource["sfc"])
-                if resource_exists:
-                    raise HTTPConflict("Service function chain with this data already exists.")  # error 409 in case of resource conflict
-
-                new_sf_chain = ServiceFunctionChain(**validated_body)
-                ServiceFunctionChain.replace(sf_chain, new_sf_chain)
-
-            return validated_body
-
-    @view_config(route_name='config_sfc_instance', request_method='DELETE')
-    def delete(self):
-        """
-        Deletes an instance of a service function chain in the database.
-
-        :return: An content of the object that has been deleted
-        :raises HTTPBadRequest: if the request parameters are invalid(invalid url query string)
-        :raises HTTPNotFound: if a resource with the given parameters doesn't exist in the database
-        """
-
-        sf_chain = self._get_sf_chain_from_url_string()
-        if sf_chain is None:
-            raise HTTPNotFound("A service function chain with the given ID doesn't exist.")
-        else:
-            deleted = sf_chain.json
-            ServiceFunctionChain.delete(sf_chain)
-            return deleted
-
-    def _get_sf_chain_from_url_string(self):
-        """
-        Retrieves a service function chain from the database by validating and then using the request url parameters.
-
-        :return: An instance of a service function chain or None if not existing
-        """
-
-        if "sfc" not in self.request.params:
-            raise HTTPBadRequest("Request format is incorrect: URL argument 'sfc' not found")
-
-        sf_chain = ServiceFunctionChain.get(sfc=self.request.params["sfc"])
-        return sf_chain
-
-    def _validate_and_create(self):
-        """
-        Validates the request body and checks if a resource with the given attributes already exists.
-
-        :return: a new instance of the model, if the resource doesn't exist
-        :raises HTTPBadRequest: if request body is not a valid JSON for the service function chain
-        :raises HTTPConflict: if the unique constraints are not preserved after the creation of a new instance
-        """
-
-        try:
-            body = self.request.body.decode(self.request.charset)
-            validated_body = validate_sfchain_body(body)  # validate the content and receive a json dictionary object
-        except AssertionError as e:
-            raise HTTPBadRequest("Bad request content. Service function chain format is incorrect: {0}".format(e.args))
-
-        resource = validated_body
-
-        resource_exists = ServiceFunctionChain.exists(resource["sfc"])
-        if resource_exists:
-            raise HTTPConflict("Service function chain with this data already exists.")  # error 409 in case of resource conflict
-
-        # create an instance of the model
-        sf_chain = ServiceFunctionChain(**resource)
-
-        return sf_chain
diff --git a/src/service/clmcservice/graphapi/conftest.py b/src/service/clmcservice/graphapi/conftest.py
index e36ac06f72af6e96dc17dcb2ec5cee5efb611aee..03e3f6d72df618f05cdeb3578c1fa155a1a090eb 100644
--- a/src/service/clmcservice/graphapi/conftest.py
+++ b/src/service/clmcservice/graphapi/conftest.py
@@ -130,7 +130,10 @@ def db_testing_data():
 
     global network_config
 
-    test_db_name = "TestInfluxDB"
+    test_sfc_name = "test_sfc"
+    test_sfc_instance_1_name = "test_sfc_premium"
+    test_sfc_instance_2_name = "test_sfc_non_premium"
+    test_db_name = test_sfc_name
 
     # ASSUMES both Influx and Neo4j are running on localhost with default ports
     influx = InfluxDBClient(host="localhost", port=8086, timeout=10)
@@ -155,65 +158,65 @@ def db_testing_data():
 
     # nginx data to report to influx
     data = [
-        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 5, 20, 1500, 15000, 1528385860),
-        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 8, 35, 1000, 11000, 1528385860),
-        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 7, 15, 2300, 10000, 1528389860),
-        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 10, 23, 98000, 1200, 1528389860),
-        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 12, 17, 2000, 7500, 1528395860),
-        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 15, 11, 1300, 6700, 1528395860),
-        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 17, 23, 3000, 8300, 1528485860),
-        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 19, 24, 76000, 1200, 1528485860),
-        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 11, 16, 2500, 7500, 1528545860),
-        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 20, 18, 1700, 12000, 1528545860)
+        ("nginx_1_ep1", "DC4", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 5, 20, 1500, 15000, 1528385860),
+        ("nginx_1_ep2", "DC6", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 8, 35, 1000, 11000, 1528385860),
+        ("nginx_1_ep1", "DC4", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 7, 15, 2300, 10000, 1528389860),
+        ("nginx_1_ep2", "DC6", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 10, 23, 98000, 1200, 1528389860),
+        ("nginx_1_ep1", "DC4", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 12, 17, 2000, 7500, 1528395860),
+        ("nginx_1_ep2", "DC6", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 15, 11, 1300, 6700, 1528395860),
+        ("nginx_1_ep1", "DC4", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 17, 23, 3000, 8300, 1528485860),
+        ("nginx_1_ep2", "DC6", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 19, 24, 76000, 1200, 1528485860),
+        ("nginx_1_ep1", "DC4", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 11, 16, 2500, 7500, 1528545860),
+        ("nginx_1_ep2", "DC6", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 20, 18, 1700, 12000, 1528545860)
     ]
     influx.write_points([
         {"measurement": "nginx",
-         "tags": {"host": host, "ipendpoint": endpoint, "location": location, "sf": sf, "sf_i": sf_i, "sfc": sfc, "sfc_i": sfc_i, "sr": sr},
+         "tags": {"flame_server": location, "flame_sfe": sfe, "flame_location": location, "flame_sfp": sfp, "flame_sf": sf, "flame_sfc": sfc, "flame_sfci": sfci},
          "fields": {"requests": num_requests, "avg_processing_time": processing_time, "avg_request_size": request_size, "avg_response_size": response_size},
          "time": timestamp * 10 ** 9
-         } for host, endpoint, location, sf, sf_i, sfc, sfc_i, sr, num_requests, processing_time, request_size, response_size, timestamp in data
+         } for sfe, location, sfp, sf, sfc, sfci, num_requests, processing_time, request_size, response_size, timestamp in data
     ])
 
     # minio data to report to influx
     data = [
-        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 12, 86, 101000, 4700, 1528386860),
-        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 15, 75, 96000, 6300, 1528386860),
-        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 7, 105, 5200, 89200, 1528388860),
-        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 12, 60, 76900, 2100, 1528388860),
-        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 11, 121, 99500, 3500, 1528410860),
-        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 12, 154, 2700, 111000, 1528410860),
-        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 14, 84, 1100, 4300, 1528412860),
-        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 5, 45, 1200, 3200, 1528412860),
-        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 7, 63, 87000, 2000, 1528414860),
-        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 16, 86, 3100, 94000, 1528414860)
+        ("minio_1_ep1", "DC4", "minio", "minio_1", test_sfc_name, test_sfc_instance_1_name, 12, 86, 101000, 4700, 1528386860),
+        ("minio_2_ep1", "DC5", "minio", "minio_2", test_sfc_name, test_sfc_instance_2_name, 15, 75, 96000, 6300, 1528386860),
+        ("minio_1_ep1", "DC4", "minio", "minio_1", test_sfc_name, test_sfc_instance_1_name, 7, 105, 5200, 89200, 1528388860),
+        ("minio_2_ep1", "DC5", "minio", "minio_2", test_sfc_name, test_sfc_instance_2_name, 12, 60, 76900, 2100, 1528388860),
+        ("minio_1_ep1", "DC4", "minio", "minio_1", test_sfc_name, test_sfc_instance_1_name, 11, 121, 99500, 3500, 1528410860),
+        ("minio_2_ep1", "DC5", "minio", "minio_2", test_sfc_name, test_sfc_instance_2_name, 12, 154, 2700, 111000, 1528410860),
+        ("minio_1_ep1", "DC4", "minio", "minio_1", test_sfc_name, test_sfc_instance_1_name, 14, 84, 1100, 4300, 1528412860),
+        ("minio_2_ep1", "DC5", "minio", "minio_2", test_sfc_name, test_sfc_instance_2_name, 5, 45, 1200, 3200, 1528412860),
+        ("minio_1_ep1", "DC4", "minio", "minio_1", test_sfc_name, test_sfc_instance_1_name, 7, 63, 87000, 2000, 1528414860),
+        ("minio_2_ep1", "DC5", "minio", "minio_2", test_sfc_name, test_sfc_instance_2_name, 16, 86, 3100, 94000, 1528414860)
     ]
     influx.write_points([
         {"measurement": "minio_http",
-         "tags": {"host": host, "ipendpoint": endpoint, "location": location, "sf": sf, "sf_i": sf_i, "sfc": sfc, "sfc_i": sfc_i, "sr": sr},
+         "tags": {"flame_server": location, "flame_sfe": sfe, "flame_location": location, "flame_sfp": sfp, "flame_sf": sf, "flame_sfc": sfc, "flame_sfci": sfci},
          "fields": {"total_requests_count": num_requests, "total_processing_time": processing_time, "total_requests_size": request_size, "total_response_size": response_size},
          "time": timestamp * 10 ** 9
-         } for host, endpoint, location, sf, sf_i, sfc, sfc_i, sr, num_requests, processing_time, request_size, response_size, timestamp in data
+         } for sfe, location, sfp, sf, sfc, sfci, num_requests, processing_time, request_size, response_size, timestamp in data
     ])
 
     # apache data to report to influx
     data = [
-        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 15, 1400, 15600, 1528386860),
-        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 17, 2200, 11200, 1528388860),
-        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 19, 700, 5700, 1528410860),
-        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 24, 1900, 4300, 1528412860),
-        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 13, 1200, 2500, 1528414860),
+        ("apache_1_ep1", "DC5", "apache", "apache_1", test_sfc_name, test_sfc_instance_2_name, 15, 1400, 15600, 1528386860),
+        ("apache_1_ep1", "DC5", "apache", "apache_1", test_sfc_name, test_sfc_instance_2_name, 17, 2200, 11200, 1528388860),
+        ("apache_1_ep1", "DC5", "apache", "apache_1", test_sfc_name, test_sfc_instance_2_name, 19, 700, 5700, 1528410860),
+        ("apache_1_ep1", "DC5", "apache", "apache_1", test_sfc_name, test_sfc_instance_2_name, 24, 1900, 4300, 1528412860),
+        ("apache_1_ep1", "DC5", "apache", "apache_1", test_sfc_name, test_sfc_instance_2_name, 13, 1200, 2500, 1528414860),
     ]
     influx.write_points([
         {"measurement": "apache",
-         "tags": {"host": host, "ipendpoint": endpoint, "location": location, "sf": sf, "sf_i": sf_i, "sfc": sfc, "sfc_i": sfc_i, "sr": sr},
+         "tags": {"flame_server": location, "flame_sfe": sfe, "flame_location": location, "flame_sfp": sfp, "flame_sf": sf, "flame_sfc": sfc, "flame_sfci": sfci},
          "fields": {"avg_processing_time": processing_time, "avg_request_size": request_size, "avg_response_size": response_size},
          "time": timestamp * 10 ** 9
-         } for host, endpoint, location, sf, sf_i, sfc, sfc_i, sr, processing_time, request_size, response_size, timestamp in data
+         } for sfe, location, sfp, sf, sfc, sfci, processing_time, request_size, response_size, timestamp in data
     ])
 
-    yield from_timestamp, to_timestamp, test_db_name, graph
+    yield from_timestamp, to_timestamp, graph
 
     # clean up after the test is over - delete the test databases and clear up the graph
     influx.drop_database("CLMCMetrics")
-    influx.drop_database("TestInfluxDB")
+    influx.drop_database(test_db_name)
     graph.delete_all()
diff --git a/src/service/clmcservice/graphapi/tests.py b/src/service/clmcservice/graphapi/tests.py
index 9dcd23eaa93819f0743dcab77756969ac53d9fe5..60ee4c1809b165a8166dc64cada16071d704b0e8 100644
--- a/src/service/clmcservice/graphapi/tests.py
+++ b/src/service/clmcservice/graphapi/tests.py
@@ -54,25 +54,21 @@ class TestGraphAPI(object):
     @pytest.mark.parametrize("body, from_timestamp, to_timestamp, error_msg", [
         (None, None, None, "A bad request error must have been raised in case of missing request body."),
         ('{}', 12341412, 1234897, "A bad request error must have been raised in case of invalid request body."),
-        ('{"database": "CLMCMetrics", "retention_policy": "autogen", "service_function_chain_instance": "sfc_i"}', 12341412, 1234897, "A bad request error must have been raised in case of invalid request body."),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": "{invalid_json}"}', 1528386860, 1528389860, "A bad request error must have been raised in case of invalid request body."),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": ["nginx", "minio"]}', 1528386860, 1528389860, "A bad request error must have been raised in case of invalid request body."),
-        ('{"retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
-         1528386860, 1528389860, "A bad request error must have been raised in case of missing database value in the request body"),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_id", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
-         1528386860, 1528389860, "A bad request error must have been raised in case of invalid sfc_i ID in the request body"),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "testsfc1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
-         1528386860, 1528389860, "A bad request error must have been raised in case of invalid sfc_i ID in the request body"),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfci"}', 12341412, 1234897, "A bad request error must have been raised in case of invalid request body."),
+        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": "{invalid_json}"}', 1528386860, 1528389860, "A bad request error must have been raised in case of invalid request body."),
+        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": ["nginx", "minio"]}', 1528386860, 1528389860, "A bad request error must have been raised in case of invalid request body."),
+        ('"service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         1528386860, 1528389860, "A bad request error must have been raised in case of missing service function chain value in the request body"),
+        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfcinstance", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         1528386860, 1528389860, "A bad request error must have been raised in case of invalid sfci ID in the request body"),
+        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
          "not a timestamp", "not a timestamp", "A bad request error must have been raised in case of invalid URL parameters."),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
          None, "not a timestamp", "A bad request error must have been raised in case of invalid URL parameters."),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
          2131212, None, "A bad request error must have been raised in case of invalid URL parameters."),
-        ('{"database": "DB-not-exist", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
          2131212, 2131212, "A bad request error must have been raised in case of a non-existing database."),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen-invalid", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
-         2131212, 2131212, "A bad request error must have been raised in case of a non-existing retention policy."),
     ])
     def test_build_error_handling(self, body, from_timestamp, to_timestamp, error_msg):
         """
@@ -108,7 +104,7 @@ class TestGraphAPI(object):
 
         global graph_1_id, graph_2_id  # these variables are used to store the ID of the graphs that were created during the execution of this test method; they are reused later when testing the delete method
 
-        from_timestamp, to_timestamp, test_db_name, graph_db = db_testing_data
+        from_timestamp, to_timestamp, graph_db = db_testing_data
 
         dc_nodes = set([node["name"] for node in graph_db.nodes.match("ComputeNode")])
         assert dc_nodes == set("DC" + str(i) for i in range(1, 7)), "Compute nodes must have been created by the db_testing_data fixture"
@@ -120,7 +116,7 @@ class TestGraphAPI(object):
                                         "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
                                  apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
                                          "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"})
-        body = dumps(dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="sfc_1", service_functions=service_functions))
+        body = dumps(dict(service_function_chain="sfc", service_function_chain_instance="sfc_1", service_functions=service_functions))
         request = testing.DummyRequest()
         request.params["from_timestamp"] = 12341412
         request.params["to_timestamp"] = 12341412
@@ -137,7 +133,7 @@ class TestGraphAPI(object):
                                         "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
                                  minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
                                         "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"})
-        build_json_body = dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="test_sfc1_1", service_functions=service_functions)
+        build_json_body = dict(service_function_chain="test_sfc", service_function_chain_instance="test_sfc_premium", service_functions=service_functions)
         body = dumps(build_json_body)
         request = testing.DummyRequest()
         request.params["from"] = from_timestamp
@@ -151,18 +147,18 @@ class TestGraphAPI(object):
         graph_1_id = request_id
 
         # check that the appropriate nodes have been created
+        sfp_names = set([node["name"] for node in graph_db.nodes.match("ServiceFunctionPackage")])
+        assert sfp_names == {"nginx", "minio"}, "The graph must contain 2 service function packages - nginx and minio"
         sf_names = set([node["name"] for node in graph_db.nodes.match("ServiceFunction")])
-        assert sf_names == {"nginx", "minio"}, "The graph must contain 2 service functions - nginx and minio"
-        sf_i_names = set([node["name"] for node in graph_db.nodes.match("ServiceFunctionInstance")])
-        assert sf_i_names == {"nginx_1", "minio_1"}, "The graph must contain 2 service function instances - nginx_1 and minio_1"
+        assert sf_names == {"nginx_1", "minio_1"}, "The graph must contain 2 service functions - nginx_1 and minio_1"
         endpoints = set([node["name"] for node in graph_db.nodes.match("Endpoint", uuid=request_id)])
         assert endpoints == {"minio_1_ep1", "nginx_1_ep1", "nginx_1_ep2"}, "The graph must contain 3 endpoints - minio_1_ep1, nginx_1_ep1, nginx_1_ep2"
-        sfc_i_names = set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChainInstance")])
-        assert sfc_i_names == {"test_sfc1_1"}, "The graph must contain 1 service function chain instance - test_sfc1_1"
+        sfci_names = set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChainInstance")])
+        assert sfci_names == {"test_sfc_premium"}, "The graph must contain 1 service function chain instance - test_sfc_premium"
         sfc_names = set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChain")])
-        assert sfc_names == {"test_sfc1"}, "The graph must contain 1 service function chain - test_sfc1"
+        assert sfc_names == {"test_sfc"}, "The graph must contain 1 service function chain - test_sfc"
 
-        reference_node = graph_db.nodes.match("Reference", uuid=request_id, sfc_i="test_sfc1_1", sfc="test_sfc1").first()
+        reference_node = graph_db.nodes.match("Reference", uuid=request_id, sfci="test_sfc_premium", sfc="test_sfc").first()
         assert reference_node is not None and reference_node["from"] == from_timestamp * 10**9 and reference_node["to"] == to_timestamp * 10**9, "Reference node must have been created"
 
         # check the appropriate edges have been created
@@ -171,15 +167,15 @@ class TestGraphAPI(object):
                 ("minio_1_ep1", "Endpoint", "DC4", "ComputeNode", "hostedBy"),
                 ("nginx_1_ep1", "Endpoint", "DC4", "ComputeNode", "hostedBy"),
                 ("nginx_1_ep2", "Endpoint", "DC6", "ComputeNode", "hostedBy"),
-                ("minio_1", "ServiceFunctionInstance", "minio_1_ep1", "Endpoint", "realisedBy"),
-                ("nginx_1", "ServiceFunctionInstance", "nginx_1_ep1", "Endpoint", "realisedBy"),
-                ("nginx_1", "ServiceFunctionInstance", "nginx_1_ep2", "Endpoint", "realisedBy"),
-                ("minio_1", "ServiceFunctionInstance", "minio", "ServiceFunction", "instanceOf"),
-                ("nginx_1", "ServiceFunctionInstance", "test_sfc1_1", "ServiceFunctionChainInstance", "utilizedBy"),
-                ("minio_1", "ServiceFunctionInstance", "test_sfc1_1", "ServiceFunctionChainInstance", "utilizedBy"),
-                ("nginx", "ServiceFunction", "test_sfc1", "ServiceFunctionChain", "utilizedBy"),
-                ("minio", "ServiceFunction", "test_sfc1", "ServiceFunctionChain", "utilizedBy"),
-                ("test_sfc1_1", "ServiceFunctionChainInstance", "test_sfc1", "ServiceFunctionChain", "instanceOf"),
+                ("minio_1", "ServiceFunction", "minio_1_ep1", "Endpoint", "realisedBy"),
+                ("nginx_1", "ServiceFunction", "nginx_1_ep1", "Endpoint", "realisedBy"),
+                ("nginx_1", "ServiceFunction", "nginx_1_ep2", "Endpoint", "realisedBy"),
+                ("minio_1", "ServiceFunction", "minio", "ServiceFunctionPackage", "instanceOf"),
+                ("nginx_1", "ServiceFunction", "test_sfc_premium", "ServiceFunctionChainInstance", "utilizedBy"),
+                ("minio_1", "ServiceFunction", "test_sfc_premium", "ServiceFunctionChainInstance", "utilizedBy"),
+                ("nginx", "ServiceFunctionPackage", "test_sfc", "ServiceFunctionChain", "utilizedBy"),
+                ("minio", "ServiceFunctionPackage", "test_sfc", "ServiceFunctionChain", "utilizedBy"),
+                ("test_sfc_premium", "ServiceFunctionChainInstance", "test_sfc", "ServiceFunctionChain", "instanceOf"),
             ), graph_db, request_id
         )
 
@@ -191,12 +187,12 @@ class TestGraphAPI(object):
             assert endpoint_node["request_size"] == pytest.approx(request_size, 1), "Wrong request size attribute of endpoint node"
             assert endpoint_node["response_size"] == pytest.approx(response_size, 1), "Wrong response size attribute of endpoint node"
 
-        # send a new request for a new service function chain and check the new subgraph has been created
+        # send a new request for a new service function chain instance and check the new subgraph has been created
         service_functions = dict(minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
                                         "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
                                  apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
                                          "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"})
-        build_json_body = dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="test_sfc2_1", service_functions=service_functions)
+        build_json_body = dict(service_function_chain="test_sfc", service_function_chain_instance="test_sfc_non_premium", service_functions=service_functions)
         body = dumps(build_json_body)
         request = testing.DummyRequest()
         request.params["from"] = from_timestamp
@@ -210,18 +206,18 @@ class TestGraphAPI(object):
         graph_2_id = request_id
 
         # check the new nodes have been created
-        assert graph_db.nodes.match("ServiceFunction", name="apache").first() is not None, "Service function apache must have been added to the graph"
+        assert graph_db.nodes.match("ServiceFunctionPackage", name="apache").first() is not None, "Service function package apache must have been added to the graph"
 
-        for sf_i in ("apache_1", "minio_2"):
-            assert graph_db.nodes.match("ServiceFunctionInstance", name=sf_i).first() is not None, "Service function instance {0} must have been added to the graph".format(sf_i)
+        for sf in ("apache_1", "minio_2"):
+            assert graph_db.nodes.match("ServiceFunction", name=sf).first() is not None, "Service function {0} must have been added to the graph".format(sf)
 
         for ep in ("minio_2_ep1", "apache_1_ep1"):
             assert graph_db.nodes.match("Endpoint", name=ep, uuid=request_id).first() is not None, "Endpoint {0} must have been added to the graph".format(ep)
 
-        assert graph_db.nodes.match("ServiceFunctionChainInstance", name="test_sfc2_1").first() is not None, "Service function chain instance test_sfc2_1 must have been added to the graph"
-        assert graph_db.nodes.match("ServiceFunctionChain", name="test_sfc2").first() is not None, "Service function chain test_sfc2 must have been added to the graph"
+        assert graph_db.nodes.match("ServiceFunctionChainInstance", name="test_sfc_non_premium").first() is not None, "Service function chain instance test_sfc_non_premium must have been added to the graph"
+        assert graph_db.nodes.match("ServiceFunctionChain", name="test_sfc").first() is not None, "Service function chain test_sfc must have been added to the graph"
 
-        reference_node = graph_db.nodes.match("Reference", uuid=request_id, sfc_i="test_sfc2_1", sfc="test_sfc2").first()
+        reference_node = graph_db.nodes.match("Reference", uuid=request_id, sfci="test_sfc_non_premium", sfc="test_sfc").first()
         assert reference_node is not None and reference_node["from"] == from_timestamp * 10**9 and reference_node["to"] == to_timestamp * 10**9, "Reference node must have been created"
 
         # check the appropriate edges have been created
@@ -229,15 +225,15 @@ class TestGraphAPI(object):
             (
                 ("minio_2_ep1", "Endpoint", "DC5", "ComputeNode", "hostedBy"),
                 ("apache_1_ep1", "Endpoint", "DC5", "ComputeNode", "hostedBy"),
-                ("minio_2", "ServiceFunctionInstance", "minio_2_ep1", "Endpoint", "realisedBy"),
-                ("apache_1", "ServiceFunctionInstance", "apache_1_ep1", "Endpoint", "realisedBy"),
-                ("minio_2", "ServiceFunctionInstance", "minio", "ServiceFunction", "instanceOf"),
-                ("apache_1", "ServiceFunctionInstance", "apache", "ServiceFunction", "instanceOf"),
-                ("minio_2", "ServiceFunctionInstance", "test_sfc2_1", "ServiceFunctionChainInstance", "utilizedBy"),
-                ("apache_1", "ServiceFunctionInstance", "test_sfc2_1", "ServiceFunctionChainInstance", "utilizedBy"),
-                ("minio", "ServiceFunction", "test_sfc2", "ServiceFunctionChain", "utilizedBy"),
-                ("apache", "ServiceFunction", "test_sfc2", "ServiceFunctionChain", "utilizedBy"),
-                ("test_sfc2_1", "ServiceFunctionChainInstance", "test_sfc2", "ServiceFunctionChain", "instanceOf")
+                ("minio_2", "ServiceFunction", "minio_2_ep1", "Endpoint", "realisedBy"),
+                ("apache_1", "ServiceFunction", "apache_1_ep1", "Endpoint", "realisedBy"),
+                ("minio_2", "ServiceFunction", "minio", "ServiceFunctionPackage", "instanceOf"),
+                ("apache_1", "ServiceFunction", "apache", "ServiceFunctionPackage", "instanceOf"),
+                ("minio_2", "ServiceFunction", "test_sfc_non_premium", "ServiceFunctionChainInstance", "utilizedBy"),
+                ("apache_1", "ServiceFunction", "test_sfc_non_premium", "ServiceFunctionChainInstance", "utilizedBy"),
+                ("minio", "ServiceFunctionPackage", "test_sfc", "ServiceFunctionChain", "utilizedBy"),
+                ("apache", "ServiceFunctionPackage", "test_sfc", "ServiceFunctionChain", "utilizedBy"),
+                ("test_sfc_non_premium", "ServiceFunctionChainInstance", "test_sfc", "ServiceFunctionChain", "instanceOf")
             ), graph_db, request_id
         )
 
@@ -258,7 +254,7 @@ class TestGraphAPI(object):
 
         global graph_1_id, graph_2_id
 
-        from_timestamp, to_timestamp, test_db_name, graph_db = db_testing_data
+        from_timestamp, to_timestamp, graph_db = db_testing_data
 
         request = testing.DummyRequest()
         request.matchdict["graph_id"] = "invalid_graph_id"
@@ -283,8 +279,10 @@ class TestGraphAPI(object):
 
         assert len(graph_db.nodes.match("Endpoint")) == 0, "All endpoint nodes should have been deleted"
         assert set([node["name"] for node in graph_db.nodes.match("ComputeNode")]) == set(["DC" + str(i) for i in range(1, 7)]), "Compute nodes must not be deleted"
-        assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionInstance")]) == {"nginx_1", "apache_1", "minio_1", "minio_2"}, "Service function instances must not be deleted."
-        assert set([node["name"] for node in graph_db.nodes.match("ServiceFunction")]) == {"nginx", "minio", "apache"}, "Service functions must not be deleted"
+        assert set([node["name"] for node in graph_db.nodes.match("ServiceFunction")]) == {"nginx_1", "apache_1", "minio_1", "minio_2"}, "Service functions must not be deleted."
+        assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionPackage")]) == {"nginx", "minio", "apache"}, "Service function packages must not be deleted"
+        assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChainInstance")]) == {"test_sfc_premium", "test_sfc_non_premium"}, "Service function chain instances must not be deleted"
+        assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChain")]) == {"test_sfc"}, "Service function chains must not be deleted"
 
     @pytest.mark.parametrize("graph_id, endpoint, compute_node, error_type, error_msg", [
         ('e8cd4768-47dd-48cd-9c74-7f8926ddbad8', None, None, HTTPBadRequest, "HTTP Bad Request must be thrown in case of missing or invalid url parameters"),
@@ -324,14 +322,14 @@ class TestGraphAPI(object):
         :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest)
         """
 
-        from_timestamp, to_timestamp, test_db_name, graph_db = db_testing_data
+        from_timestamp, to_timestamp, graph_db = db_testing_data
 
         # create a graph to use for RTT test by using the build API endpoint
         service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)",
                                         "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
                                  minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
                                         "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"})
-        build_json_body = dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="test_sfc1_1", service_functions=service_functions)
+        build_json_body = dict(service_function_chain="test_sfc", service_function_chain_instance="test_sfc_premium", service_functions=service_functions)
         body = dumps(build_json_body)
         request = testing.DummyRequest()
         request.params["from"] = from_timestamp
@@ -379,9 +377,9 @@ class TestGraphAPI(object):
 
         # go through the set of input/output (expected) parameters and assert actual results match with expected ones
         for dc, endpoint, forward_latencies, reverse_latencies, response_time, request_size, response_size, rtt, global_tags in (
-            ("DC6", "nginx_1_ep2", [], [], 22.2, 35600, 6420, 22.2, {"location": "DC6", "sr": "sr6", "ipendpoint": "nginx_1_ep2", "host": "host2", "sfc": "test_sfc1", "sfc_i": "test_sfc1_1", "sf": "nginx", "sf_i": "nginx_1"}),
-            ("DC2", "nginx_1_ep2", [11, 15, 4.5], [5.5, 13, 7.5], 22.2, 35600, 6420, 78, {"location": "DC6", "sr": "sr6", "ipendpoint": "nginx_1_ep2", "host": "host2", "sfc": "test_sfc1", "sfc_i": "test_sfc1_1", "sf": "nginx", "sf_i": "nginx_1"}),
-            ("DC3", "nginx_1_ep1", [12.5], [7.5], 18.2, 2260, 9660, 38, {"location": "DC4", "sr": "sr4", "ipendpoint": "nginx_1_ep1", "host": "host1", "sfc": "test_sfc1", "sfc_i": "test_sfc1_1", "sf": "nginx", "sf_i": "nginx_1"})
+            ("DC6", "nginx_1_ep2", [], [], 22.2, 35600, 6420, 22.2, {"flame_location": "DC6", "flame_sfe": "nginx_1_ep2", "flame_server": "DC6", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"}),
+            ("DC2", "nginx_1_ep2", [11, 15, 4.5], [5.5, 13, 7.5], 22.2, 35600, 6420, 78, {"flame_location": "DC6", "flame_sfe": "nginx_1_ep2", "flame_server": "DC6", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"}),
+            ("DC3", "nginx_1_ep1", [12.5], [7.5], 18.2, 2260, 9660, 38, {"flame_location": "DC4", "flame_sfe": "nginx_1_ep1", "flame_server": "DC4", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"})
         ):
             request = testing.DummyRequest()
             request.matchdict["graph_id"] = request_id
@@ -399,7 +397,7 @@ class TestGraphAPI(object):
                                         "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
                                  apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
                                          "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"})
-        build_json_body = dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="test_sfc2_1", service_functions=service_functions)
+        build_json_body = dict(service_function_chain="test_sfc", service_function_chain_instance="test_sfc_non_premium", service_functions=service_functions)
         body = dumps(build_json_body)
         request = testing.DummyRequest()
         request.params["from"] = from_timestamp
@@ -413,10 +411,10 @@ class TestGraphAPI(object):
 
         # go through the set of input/output (expected) parameters and assert actual results match with expected ones
         for dc, endpoint, forward_latencies, reverse_latencies, response_time, request_size, response_size, rtt, global_tags in (
-            ("DC5", "apache_1_ep1", [], [], 17.6, 1480, 7860, 17.6, {"location": "DC5", "sr": "sr5", "ipendpoint": "apache_1_ep1", "host": "host5", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "apache", "sf_i": "apache_1"}),
-            ("DC5", "minio_2_ep1", [], [], 7, 2998, 3610, 7, {"location": "DC5", "sr": "sr5", "ipendpoint": "minio_2_ep1", "host": "host4", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "minio", "sf_i": "minio_2"}),
-            ("DC3", "apache_1_ep1", [10, 15], [13, 9], 17.6, 1480, 7860, 64, {"location": "DC5", "sr": "sr5", "ipendpoint": "apache_1_ep1", "host": "host5", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "apache", "sf_i": "apache_1"}),
-            ("DC2", "minio_2_ep1", [11, 15], [13, 7.5], 7, 2998, 3610, 53, {"location": "DC5", "sr": "sr5", "ipendpoint": "minio_2_ep1", "host": "host4", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "minio", "sf_i": "minio_2"})
+            ("DC5", "apache_1_ep1", [], [], 17.6, 1480, 7860, 17.6, {"flame_location": "DC5", "flame_sfe": "apache_1_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "apache", "flame_sf": "apache_1"}),
+            ("DC5", "minio_2_ep1", [], [], 7, 2998, 3610, 7, {"flame_location": "DC5", "flame_sfe": "minio_2_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "minio", "flame_sf": "minio_2"}),
+            ("DC3", "apache_1_ep1", [10, 15], [13, 9], 17.6, 1480, 7860, 64, {"flame_location": "DC5", "flame_sfe": "apache_1_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "apache", "flame_sf": "apache_1"}),
+            ("DC2", "minio_2_ep1", [11, 15], [13, 7.5], 7, 2998, 3610, 53, {"flame_location": "DC5", "flame_sfe": "minio_2_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "minio", "flame_sf": "minio_2"})
         ):
             request = testing.DummyRequest()
             request.matchdict["graph_id"] = request_id
diff --git a/src/service/clmcservice/graphapi/utilities.py b/src/service/clmcservice/graphapi/utilities.py
index 58d1dff2c40c8ec550194d96e887f1ed05d71439..18efc199408c727c9bdbb42c81de68da67214934 100644
--- a/src/service/clmcservice/graphapi/utilities.py
+++ b/src/service/clmcservice/graphapi/utilities.py
@@ -30,10 +30,10 @@ import logging
 GRAPH_ROUND_TRIP_TIME_URL_PARAMS = ("compute_node", "endpoint")
 
 GRAPH_BUILD_URL_PARAMS = ("from", "to")
-GRAPH_BUILD_QUERY_PARAMS = {"database", "retention_policy", "service_function_chain_instance", "service_functions"}
+GRAPH_BUILD_QUERY_PARAMS = {"service_function_chain", "service_function_chain_instance", "service_functions"}
 GRAPH_BUILD_SF_QUERY_PARAMS = {"response_time_field", "request_size_field", "response_size_field", "measurement_name"}
 
-INFLUX_QUERY_TEMPLATE = 'SELECT {0} AS mean_response_time, {1} AS mean_request_size, {2} AS mean_response_size FROM "{3}"."{4}".{5} WHERE sfc_i=\'{6}\' and time>={7} and time<{8} GROUP BY ipendpoint, location, sf_i, host, sr'
+INFLUX_QUERY_TEMPLATE = 'SELECT {0} AS mean_response_time, {1} AS mean_request_size, {2} AS mean_response_size FROM "{3}"."{4}".{5} WHERE "flame_sfc"=\'{6}\' and "flame_sfci"=\'{7}\' and time>={8} and time<{9} GROUP BY "flame_sfe", "flame_location", "flame_sf"'
 
 
 RTT_CYPHER_QUERY_TEMPLATE = """
@@ -57,7 +57,9 @@ def validate_json_queries_body(body):
     Validates the request body containing mappings from service functions to queries to execute.
 
     :param body: the request body to validate
+
     :return the validated json queries dictionary object
+
     :raise AssertionError: if the body is invalid
     """
 
@@ -70,15 +72,16 @@ def validate_json_queries_body(body):
 
     assert GRAPH_BUILD_QUERY_PARAMS == set(body.keys()), "Invalid JSON query document."
 
-    sfc_i = body["service_function_chain_instance"]
-    sfc_i_subparts = sfc_i.split('_')
-    assert len(sfc_i_subparts) > 1, "Incorrect format of service function chain instance ID - use format <sfcID>_<instanceNum>"
-
-    # check the last part of the sfc_i ID is a number
-    try:
-        int(sfc_i_subparts[-1])
-    except ValueError:
-        assert False, "Incorrect format of service function chain instance ID - use format <sfcID>_<instanceNum>"
+    # NOTE: this code is now outdated - we no longer have SFC instance ID depending on the SFC ID
+    # sfc_i = body["service_function_chain_instance"]
+    # sfc_i_subparts = sfc_i.split('_')
+    # assert len(sfc_i_subparts) > 1, "Incorrect format of service function chain instance ID - use format <sfcID>_<instanceNum>"
+    #
+    # # check the last part of the sfc_i ID is a number
+    # try:
+    #     int(sfc_i_subparts[-1])
+    # except ValueError:
+    #     assert False, "Incorrect format of service function chain instance ID - use format <sfcID>_<instanceNum>"
 
     assert type(body["service_functions"]) == dict, "The service function description should be represented with a dictionary."
 
@@ -193,22 +196,21 @@ def build_temporal_graph(request_id, from_timestamp, to_timestamp, json_queries,
 
     global INFLUX_QUERY_TEMPLATE
 
-    db = json_queries["database"]
-    rp = json_queries["retention_policy"]
-    sfc_i = json_queries["service_function_chain_instance"]
-
-    log.info("Building graph for service function chain {0} from database {1} with retention policy {2}".format(sfc_i, db, rp))
+    sfc = json_queries["service_function_chain"]
+    sfci = json_queries["service_function_chain_instance"]
+    db = sfc
+    rp = "autogen"
 
-    sfc = "_".join(sfc_i.split('_')[: -1])  # assumes sfc_i is always in the form <sfc>_<num>
+    log.info("Building graph for service function chain {0}/{1} from database {2} with retention policy {3}".format(sfc, sfci, db, rp))
 
     # create a UUID reference node
-    reference_node = Node("Reference", **{"uuid": request_id, "sfc": sfc, "sfc_i": sfc_i, "from": from_timestamp, "to": to_timestamp})
+    reference_node = Node("Reference", **{"uuid": request_id, "sfc": sfc, "sfci": sfci, "from": from_timestamp, "to": to_timestamp})
     graph.create(reference_node)
 
     # create a node for the service function chain if it doesn't exist
     service_function_chain_node = find_or_create_node(graph, "ServiceFunctionChain", name=sfc)
     # create a node for the service function chain instance if it doesn't exist
-    service_function_chain_instance_node = find_or_create_node(graph, "ServiceFunctionChainInstance", name=sfc_i)
+    service_function_chain_instance_node = find_or_create_node(graph, "ServiceFunctionChainInstance", name=sfci)
     # create a instanceOf edge if it doesn't exist
     find_or_create_edge(graph, "instanceOf", service_function_chain_instance_node, service_function_chain_node)
 
@@ -224,12 +226,12 @@ def build_temporal_graph(request_id, from_timestamp, to_timestamp, json_queries,
         measurement = query_data["measurement_name"]
 
         # build up the query by setting the placeholders in the query template
-        query_to_execute = INFLUX_QUERY_TEMPLATE.format(response_time_field, request_size_field, response_size_field, db, rp, measurement, sfc_i, from_timestamp, to_timestamp)
+        query_to_execute = INFLUX_QUERY_TEMPLATE.format(response_time_field, request_size_field, response_size_field, db, rp, measurement, sfc, sfci, from_timestamp, to_timestamp)
 
         # create a node for the service function if it doesn't exist
-        service_function_node = find_or_create_node(graph, "ServiceFunction", name=service_function)
+        service_function_package_node = find_or_create_node(graph, "ServiceFunctionPackage", name=service_function)
         # crate a utilizedBy edge between the service function and the service function chain
-        find_or_create_edge(graph, "utilizedBy", service_function_node, service_function_chain_node)
+        find_or_create_edge(graph, "utilizedBy", service_function_package_node, service_function_chain_node)
 
         log.info("Executing query: {0}".format(query_to_execute))
         result = influx_client.query(query_to_execute)  # execute the query
@@ -247,26 +249,26 @@ def build_temporal_graph(request_id, from_timestamp, to_timestamp, json_queries,
             request_size = result_point["mean_request_size"]  # extract the avg request size of the SF from the result
             response_size = result_point["mean_response_size"]  # extract the avg response size of the SF from the result
 
-            # create a ServiceFunctionInstance node from the tag value (if it is not already created)
-            service_function_instance_node = find_or_create_node(graph, "ServiceFunctionInstance", name=tags["sf_i"])
-            # create an edge between the instance and the service function (if it is not already created)
-            find_or_create_edge(graph, "instanceOf", service_function_instance_node, service_function_node)
-            # crate a utilizedBy edge between the service function instance and the service function chain instance
-            find_or_create_edge(graph, "utilizedBy", service_function_instance_node, service_function_chain_instance_node)
+            # create a ServiceFunction node from the tag value (if it is not already created)
+            service_function_node = find_or_create_node(graph, "ServiceFunction", name=tags["flame_sf"])
+            # create an edge between the the service function and the package (if it is not already created)
+            find_or_create_edge(graph, "instanceOf", service_function_node, service_function_package_node)
+            # crate a utilizedBy edge between the service function and the service function chain instance
+            find_or_create_edge(graph, "utilizedBy", service_function_node, service_function_chain_instance_node)
 
             # create an Endpoint node from the tag value (if it is not already created)
-            ipendpoint_node = find_or_create_node(graph, "Endpoint", name=tags["ipendpoint"], response_time=response_time, request_size=request_size, response_size=response_size, uuid=request_id, host=tags["host"], sr=tags["sr"])
-            # create an edge between the instance and the endpoint (if it is not already created)
-            find_or_create_edge(graph, "realisedBy", service_function_instance_node, ipendpoint_node)
+            ipendpoint_node = find_or_create_node(graph, "Endpoint", name=tags["flame_sfe"], response_time=response_time, request_size=request_size, response_size=response_size, uuid=request_id)
+            # create an edge between the service function and the endpoint (if it is not already created)
+            find_or_create_edge(graph, "realisedBy", service_function_node, ipendpoint_node)
 
             # create a ComputeNode node from the tag value (if it is not already created)
-            compute_node = find_or_create_node(graph, "ComputeNode", name=tags["location"])
+            compute_node = find_or_create_node(graph, "ComputeNode", name=tags["flame_location"])
             # create an edge between the endpoint and the compute node (if it is not already created)
             find_or_create_edge(graph, "hostedBy", ipendpoint_node, compute_node)
 
             compute_nodes.add(compute_node)  # add the compute node to the set of compute nodes
 
-    log.info("Finished building graph for service function chain {0} from database {1} with retention policy {2}".format(sfc_i, db, rp))
+    log.info("Finished building graph for service function chain {0} from database {1} with retention policy {2}".format(sfci, db, rp))
 
 
 def delete_temporal_subgraph(graph, subgraph_id):
diff --git a/src/service/clmcservice/graphapi/views.py b/src/service/clmcservice/graphapi/views.py
index 79ff877a8fa47addc0d06f0369a1165b138ccb9b..0223aea5b0d7b54967d10ad91832be8592ad10f4 100644
--- a/src/service/clmcservice/graphapi/views.py
+++ b/src/service/clmcservice/graphapi/views.py
@@ -74,13 +74,9 @@ class GraphAPI(object):
         graph = Graph(host=self.request.registry.settings['neo4j_host'], password=self.request.registry.settings['neo4j_password'])
         influx_client = InfluxDBClient(host=self.request.registry.settings['influx_host'], port=self.request.registry.settings['influx_port'], timeout=10)
 
-        database_name = json_queries["database"]
+        database_name = json_queries["service_function_chain"]
         if database_name not in [db["name"] for db in influx_client.get_list_database()]:
-            raise HTTPBadRequest("Database {0} not found.".format(database_name))
-
-        retention_policy = json_queries["retention_policy"]
-        if retention_policy not in [rp["name"] for rp in influx_client.get_list_retention_policies(database_name)]:
-            raise HTTPBadRequest("Retention policy {0} for database {1} not found.".format(retention_policy, database_name))
+            raise HTTPBadRequest("Database for service function chain {0} not found.".format(database_name))
 
         from_timestamp = params['from'] * 10**9
         to_timestamp = params['to'] * 10**9
@@ -156,20 +152,20 @@ class GraphAPI(object):
             data = graph.run(query_to_execute).data()  # returns a list of dictionaries, each dictionary represents a row in the result
             result = data[0]
 
-        sf_i_node = graph.match(nodes=(None, endpoint_node), r_type="realisedBy").first().start_node
-        if sf_i_node is None:
-            msg = "No service function instance found associated with endpoint {0}".format(endpoint_node["name"])
+        sf_node = graph.match(nodes=(None, endpoint_node), r_type="realisedBy").first().start_node
+        if sf_node is None:
+            msg = "No service function found associated with endpoint {0}".format(endpoint_node["name"])
             log.error("Unexpected error: {0}".format(msg))
             raise HTTPBadRequest(msg)
 
-        sf_node = graph.match(nodes=(sf_i_node, None), r_type="instanceOf").first().end_node
-        if sf_node is None:
-            msg = "No service function found associated with service function instance {0}".format(sf_i_node["name"])
+        sf_package_node = graph.match(nodes=(sf_node, None), r_type="instanceOf").first().end_node
+        if sf_package_node is None:
+            msg = "No service function package found associated with service function {0}".format(sf_node["name"])
             log.error("Unexpected error: {0}".format(msg))
             raise HTTPBadRequest(msg)
 
-        result["global_tags"] = {"ipendpoint": endpoint_node["name"], "host": endpoint_node["host"], "location": hosted_by_node["name"], "sr": endpoint_node["sr"],
-                                 "sfc": reference_node["sfc"], "sfc_i": reference_node["sfc_i"], "sf": sf_node["name"], "sf_i": sf_i_node["name"]}
+        result["global_tags"] = {"flame_sfe": endpoint_node["name"], "flame_server": hosted_by_node["name"], "flame_location": hosted_by_node["name"],
+                                 "flame_sfc": reference_node["sfc"], "flame_sfci": reference_node["sfci"], "flame_sfp": sf_package_node["name"], "flame_sf": sf_node["name"]}
 
         # calculate the Round-Trip-Time
         total_forward_latency = sum(result["forward_latencies"])
diff --git a/src/service/clmcservice/models/__init__.py b/src/service/clmcservice/models/__init__.py
index 6ceb55de67a31ce1456af9e100204097b5120309..bdf3774dab46f54179ef4e30aa05c18f2956f9e9 100644
--- a/src/service/clmcservice/models/__init__.py
+++ b/src/service/clmcservice/models/__init__.py
@@ -1,3 +1,2 @@
 from .meta import DBSession
 from .whoami_models import ServiceFunctionEndpoint
-from .config_models import ServiceFunctionChain
diff --git a/src/service/clmcservice/models/config_models.py b/src/service/clmcservice/models/config_models.py
deleted file mode 100644
index 17ced600999db1440c6afc1daaf26f2aab6b6022..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/models/config_models.py
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/python3
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          02-07-2018
-//      Created for Project :   FLAME
-"""
-
-from sqlalchemy import Column, String, and_
-from sqlalchemy.dialects.postgresql import JSONB
-from clmcservice.models.meta import Base
-
-
-class ServiceFunctionChain(Base):
-    """
-    This class defines the service function chain model of the config API, declaring the relations between individual service functions per service function chain.
-    """
-
-    __tablename__ = 'sfchain'  # table name in the PostgreSQL database
-
-    sfc = Column(String, nullable=False, primary_key=True)  # service function chain label
-    chain = Column(JSONB, nullable=False)  # the service function chain graph represented by a python dictionary (JSON object essentially)
-
-    @property
-    def json(self):
-        """
-        Converts an instance of a ServiceFunctionChain to JSON format.
-
-        :return: a python dictionary object
-        """
-
-        fields = {c.name: getattr(self, c.name) for c in self.__table__.columns}
-
-        return fields
-
-    @staticmethod
-    def required_columns():
-        """
-        Returns the required columns for constructing a valid instance.
-        :return: a generator object
-        """
-
-        return tuple(column.name for column in ServiceFunctionChain.__table__.columns)
-
-    @staticmethod
-    def get(sfc):
-        """
-        Gets the instance matching the sfc argument
-
-        :param sfc: service function chain id
-
-        :return: the first object from the result set that matches the sfc argument (must be only one)
-        """
-
-        return ServiceFunctionChain.query().filter(and_(ServiceFunctionChain.sfc == sfc)).first()
-
-    @staticmethod
-    def exists(sfc):
-        """
-        Checks if an instance matching the sfc exists.
-
-        :param sfc: service function chain id
-
-        :return: True if exists, False otherwise
-        """
-
-        return ServiceFunctionChain.get(sfc) is not None
diff --git a/src/service/clmcservice/models/whoami_models.py b/src/service/clmcservice/models/whoami_models.py
index 30466ccfeb4ad77ad7c13a020e1664f1f67f33fe..3fdc01ea7030c777e57aca94facc765eca0a2ab9 100644
--- a/src/service/clmcservice/models/whoami_models.py
+++ b/src/service/clmcservice/models/whoami_models.py
@@ -33,17 +33,15 @@ class ServiceFunctionEndpoint(Base):
 
     __tablename__ = 'sfendpoint'  # table name in the PostgreSQL database
 
-    __table_args__ = (UniqueConstraint('sf_i', 'sf_endpoint', 'sr'),)  # defines a unique constraint across 3 columns - sf_i, sf_endpoint, sr
+    __table_args__ = (UniqueConstraint('sf', 'sf_endpoint'),)  # defines a unique constraint across 2 columns - sf, sf_endpoint
 
-    uid = Column(Integer, primary_key=True, autoincrement=True, nullable=False)  # a primary key integer field (auto incremented)
-
-    location = Column(String, nullable=False)  # cluster label
+    server = Column(String, nullable=False)  # cluster label
+    location = Column(String, nullable=False)  # location label
     sfc = Column(String, nullable=False)  # service function chain label
-    sfc_i = Column(String, nullable=False)  # service function chain instance identifier
-    sf = Column(String, nullable=False)  # service function label
-    sf_i = Column(String, nullable=False)   # service function identifier (potentially FQDN)
-    sf_endpoint = Column(String, nullable=False)  # service function endpoint (potentially IP address)
-    sr = Column(String, nullable=False)  # service router ID - service router that connects the VM to FLAME
+    sfc_instance = Column(String, nullable=False)  # service function chain instance identifier
+    sf_package = Column(String, nullable=False)  # service function package label
+    sf = Column(String, nullable=False)   # service function node defined in the TOSCA resource specification
+    sf_endpoint = Column(String, nullable=False, primary_key=True)  # service function endpoint (FQDN(s) + IP address)
 
     @property
     def json(self):
@@ -54,52 +52,29 @@ class ServiceFunctionEndpoint(Base):
         """
 
         fields = {c.name: getattr(self, c.name) for c in self.__table__.columns}
-        fields.pop("uid")
 
         return fields
 
     @staticmethod
-    def required_columns():
-        """
-        Returns the required columns for constructing a valid instance.
-
-        :return: a generator object
+    def get(sf_endpoint):
         """
+        Gets the service function endpoint object or None if not existing.
 
-        return tuple(column.name for column in ServiceFunctionEndpoint.__table__.columns if column.name != "uid")
-
-    @staticmethod
-    def constrained_columns():
-        """
-        :return: the columns that are uniquely identifying an instance of this model.
-        """
-
-        return tuple(column.name for column in ServiceFunctionEndpoint.__table_args__[0].columns)
-
-    @staticmethod
-    def get(sf_i, sf_endpoint, sr):
-        """
-        Gets the instance matching the unique constraint or None if not existing.
-
-        :param sf_i: service function instance
         :param sf_endpoint: service function endpoint
-        :param sr: service router
 
-        :return: the first object from the result set that matches the unique constraint or None
+        :return: the first object from the result set that matches the unique constraint (should be only one) or None
         """
 
-        return ServiceFunctionEndpoint.query().filter(and_(ServiceFunctionEndpoint.sf_i == sf_i, ServiceFunctionEndpoint.sf_endpoint == sf_endpoint, ServiceFunctionEndpoint.sr == sr)).first()
+        return ServiceFunctionEndpoint.query().filter(ServiceFunctionEndpoint.sf_endpoint == sf_endpoint).first()
 
     @staticmethod
-    def exists(sf_i, sf_endpoint, sr):
+    def exists(sf_endpoint):
         """
-        Checks if an instance matching the unique constraint exists.
+        Checks if an instance matching the sf_endpoint ID exists.
 
-        :param sf_i: service function instance
-        :param sf_endpoint: service function endpoint
-        :param sr: service router
+        :param sf_endpoint: service function endpoint ID to check
 
         :return: True if exists, False otherwise
         """
 
-        return ServiceFunctionEndpoint.get(sf_i, sf_endpoint, sr) is not None
+        return ServiceFunctionEndpoint.get(sf_endpoint) is not None
diff --git a/src/service/clmcservice/static/flame_clmc_alerts_definitions.yaml b/src/service/clmcservice/static/flame_clmc_alerts_definitions.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4553746b081258c653d5b90e5d5798ae611b004f
--- /dev/null
+++ b/src/service/clmcservice/static/flame_clmc_alerts_definitions.yaml
@@ -0,0 +1,7 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+policy_types:
+
+  eu.ict-flame.policies.StateChange:
+
+    derived_from: tosca.policies.Update
\ No newline at end of file
diff --git a/src/service/clmcservice/whoamiapi/tests.py b/src/service/clmcservice/whoamiapi/tests.py
index f50886616960d4c6fb8c24425895757057e69525..9c6238c45b6fc8efb32c815d2b62961c06ccc953 100644
--- a/src/service/clmcservice/whoamiapi/tests.py
+++ b/src/service/clmcservice/whoamiapi/tests.py
@@ -57,7 +57,7 @@ class TestWhoamiAPI(object):
         response = WhoamiAPI(request).get_all()
         assert response == [], "Initially there mustn't be any service function endpoint configurations in the database."
 
-        sf_e = ServiceFunctionEndpoint(location="DC1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i1", sf_endpoint="sf_endpoint1", sr="sr1")
+        sf_e = ServiceFunctionEndpoint(server="DC1", location="DC1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i1", sf_endpoint="sf_endpoint1")
         expected_response_data = [sf_e.json]
         ServiceFunctionEndpoint.add(sf_e)  # adds the new instance of the model to the database
 
@@ -65,10 +65,10 @@ class TestWhoamiAPI(object):
         response = WhoamiAPI(request).get_all()
         assert response == expected_response_data, "Incorrect response data with 1 service function endpoint configuration."
 
-        sf_e = ServiceFunctionEndpoint(location="DC2", sfc="sfc2", sfc_i="sfc_i2", sf="sf2", sf_i="sf_i2", sf_endpoint="sf_endpoint2", sr="sr2")
+        sf_e = ServiceFunctionEndpoint(server="DC2", location="DC2", sfc="sfc2", sfc_instance="sfc_i2", sf_package="sf2", sf="sf_i2", sf_endpoint="sf_endpoint2")
         expected_response_data.append(sf_e.json)
         ServiceFunctionEndpoint.add(sf_e)
-        sf_e = ServiceFunctionEndpoint(location="DC3", sfc="sfc3", sfc_i="sfc_i3", sf="sf3", sf_i="sf_i3", sf_endpoint="sf_endpoint3", sr="sr3")
+        sf_e = ServiceFunctionEndpoint(server="DC3", location="DC3", sfc="sfc3", sfc_instance="sfc_i3", sf_package="sf3", sf="sf_i3", sf_endpoint="sf_endpoint3")
         expected_response_data.append(sf_e.json)
         ServiceFunctionEndpoint.add(sf_e)
 
@@ -87,21 +87,17 @@ class TestWhoamiAPI(object):
 
         self._validation_of_url_parameters_test("get_one")
 
-        sf_e = ServiceFunctionEndpoint(location="DC1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i1", sf_endpoint="sf_endpoint1", sr="sr1")
+        sf_e = ServiceFunctionEndpoint(location="DC1", server="DC1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i1", sf_endpoint="sf_endpoint1")
         expected_response_data = sf_e.json
         ServiceFunctionEndpoint.add(sf_e)  # adds the new instance of the model to the database
 
         request = testing.DummyRequest()
         request.params["sf_endpoint"] = "sf_endpoint1"
-        request.params["sf_i"] = "sf_i1"
-        request.params["sr"] = "sr1"
         response = WhoamiAPI(request).get_one()
         assert response == expected_response_data, "Invalid data returned in the response of GET instance"
 
         request = testing.DummyRequest()
         request.params["sf_endpoint"] = "sf_endpoint2"
-        request.params["sf_i"] = "sf_i2"
-        request.params["sr"] = "sr2"
         error_raised = False
         try:
             WhoamiAPI(request).get_one()
@@ -118,13 +114,13 @@ class TestWhoamiAPI(object):
         response = WhoamiAPI(request).get_all()
         assert response == [], "Initially there mustn't be any service function endpoint configurations in the database."
 
-        resource = dict(location="DC1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i1", sf_endpoint="sf_endpoint1", sr="sr1")
+        resource = dict(server="DC1", location="DC1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i1", sf_endpoint="sf_endpoint1")
         json_data = dumps(resource)
         request = testing.DummyRequest()
         request.body = json_data.encode(request.charset)
         response = WhoamiAPI(request).post()
         assert response == resource, "POST request must return the created resource"
-        assert ServiceFunctionEndpoint.exists("sf_i1", "sf_endpoint1", "sr1"), "POST request must have created the resource"
+        assert ServiceFunctionEndpoint.exists("sf_endpoint1"), "POST request must have created the resource"
 
         resource["location"] = "DC2"
         json_data = dumps(resource)
@@ -138,11 +134,11 @@ class TestWhoamiAPI(object):
         assert error_raised, "An error must be raised when trying to create a resource which breaks the unique constraint"
 
     @pytest.mark.parametrize("body, valid", [
-        ('{"location": "DC1", "sfc": "sfc1", "sfc_i": "sfc_i1", "sf": "sf1", "sf_i": "sf_i1", "sf_endpoint": "sf_endpoint1", "sr": "sr1"}', True),
-        ('{"location": "DC2", "sfc": "sfc2", "sfc_i": "sfc_i2", "sf": "sf2", "sf_i": "sf_i2", "sf_endpoint": "sf_endpoint2", "sr": "sr2"}', True),
+        ('{"location": "DC1", "server": "DC1", "sfc": "sfc1", "sfc_instance": "sfc_i1", "sf_package": "sf1", "sf": "sf_i1", "sf_endpoint": "sf_endpoint1"}', True),
+        ('{"location": "DC2", "server": "DC2", "sfc": "sfc2", "sfc_instance": "sfc_i2", "sf_package": "sf2", "sf": "sf_i2", "sf_endpoint": "sf_endpoint2"}', True),
         ('{}', False),
-        ('{"location": "DC1", "sfc": "sfc1", "sfc_i": "sfc_i1", "sf": "sf1", "sf_i": "sf_i1"}', False),
-        ('{"place": "DC2", "sfc": "sfc2", "sfc_i": "sfc_i2", "sf": "sf2", "sf_i": "sf_i2", "sf_endpoint": "sf_endpoint2", "sr": "sr2"}', False),
+        ('{"location": "DC1", "server": "DC1", "sfc": "sfc1", "sfc_instance": "sfc_i1", "sf_package": "sf1", "sf": "sf_i1"}', False),
+        ('{"place": "DC2", "server": "DC2", "sfc": "sfc2", "sfc_instance": "sfc_i2", "sf_package": "sf2", "sf": "sf_i2", "sf_endpoint": "sf_endpoint2"}', False),
         ('{invalid json}', False),
     ])
     def test_post_body_validation(self, body, valid):
@@ -173,12 +169,10 @@ class TestWhoamiAPI(object):
 
         self._validation_of_url_parameters_test("put")
 
-        resource = dict(location="location1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i1", sf_endpoint="sf_endpoint1", sr="sr1")
+        resource = dict(location="location1", server="location1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i1", sf_endpoint="sf_endpoint1")
         body = dumps(resource)
         request = testing.DummyRequest()
         request.params["sf_endpoint"] = "sf_endpoint1"
-        request.params["sf_i"] = "sf_i1"
-        request.params["sr"] = "sr1"
         request.body = body.encode(request.charset)
         error_raised = False
         try:
@@ -187,41 +181,35 @@ class TestWhoamiAPI(object):
             error_raised = True
         assert error_raised, "Not found error must be raised in case of a non existing service function endpoint"
 
-        sf_e = ServiceFunctionEndpoint(location="DC1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i1", sf_endpoint="sf_endpoint1", sr="sr1")
+        sf_e = ServiceFunctionEndpoint(location="DC1", server="DC1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i1", sf_endpoint="sf_endpoint1")
         ServiceFunctionEndpoint.add(sf_e)  # adds the new instance of the model to the database
 
-        resource = dict(location="location1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i1", sf_endpoint="sf_endpoint1", sr="sr1")
+        resource = dict(location="location1", server="location1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i1", sf_endpoint="sf_endpoint1")
         body = dumps(resource)
         request = testing.DummyRequest()
         request.params["sf_endpoint"] = "sf_endpoint1"
-        request.params["sf_i"] = "sf_i1"
-        request.params["sr"] = "sr1"
         request.body = body.encode(request.charset)
         response = WhoamiAPI(request).put()
         assert response == resource, "PUT request must return the updated resource"
-        assert ServiceFunctionEndpoint.get("sf_i1", "sf_endpoint1", "sr1").json["location"] == "location1"
+        assert ServiceFunctionEndpoint.get("sf_endpoint1").json["location"] == "location1"
 
-        resource = dict(location="DC1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i2", sf_endpoint="sf_endpoint2", sr="sr2")
+        resource = dict(location="DC1", server="DC1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i2", sf_endpoint="sf_endpoint2")
         body = dumps(resource)
         request = testing.DummyRequest()
         request.params["sf_endpoint"] = "sf_endpoint1"
-        request.params["sf_i"] = "sf_i1"
-        request.params["sr"] = "sr1"
         request.body = body.encode(request.charset)
         response = WhoamiAPI(request).put()
         assert response == resource, "PUT request must return the updated resource"
-        assert not ServiceFunctionEndpoint.exists("sf_i1", "sf_endpoint1", "sr1"), "Resource has not been updated"
-        assert ServiceFunctionEndpoint.exists("sf_i2", "sf_endpoint2", "sr2"), "Resource has not been updated"
+        assert not ServiceFunctionEndpoint.exists("sf_endpoint1"), "Resource has not been updated"
+        assert ServiceFunctionEndpoint.exists("sf_endpoint2"), "Resource has not been updated"
 
-        sf_e = ServiceFunctionEndpoint(location="DC1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i1", sf_endpoint="sf_endpoint1", sr="sr1")
+        sf_e = ServiceFunctionEndpoint(location="DC1", server="DC1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i1", sf_endpoint="sf_endpoint1")
         ServiceFunctionEndpoint.add(sf_e)  # adds the new instance of the model to the database
 
-        resource = dict(location="DC1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i2", sf_endpoint="sf_endpoint2", sr="sr2")
+        resource = dict(location="DC1", server="DC1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i2", sf_endpoint="sf_endpoint2")
         body = dumps(resource)
         request = testing.DummyRequest()
         request.params["sf_endpoint"] = "sf_endpoint1"
-        request.params["sf_i"] = "sf_i1"
-        request.params["sr"] = "sr1"
         request.body = body.encode(request.charset)
         error_raised = False
         try:
@@ -231,11 +219,11 @@ class TestWhoamiAPI(object):
         assert error_raised, "PUT request breaks unique constraint"
 
     @pytest.mark.parametrize("body, valid", [
-        ('{"location": "DC1", "sfc": "sfc1", "sfc_i": "sfc_i1", "sf": "sf1", "sf_i": "sf_i1", "sf_endpoint": "sf_endpoint1", "sr": "sr1"}', True),
-        ('{"location": "DC2", "sfc": "sfc2", "sfc_i": "sfc_i2", "sf": "sf2", "sf_i": "sf_i2", "sf_endpoint": "sf_endpoint2", "sr": "sr2"}', True),
+        ('{"location": "DC1", "server": "DC1", "sfc": "sfc1", "sfc_instance": "sfc_i1", "sf_package": "sf1", "sf": "sf_i1", "sf_endpoint": "sf_endpoint1"}', True),
+        ('{"location": "DC2", "server": "DC2", "sfc": "sfc2", "sfc_instance": "sfc_i2", "sf_package": "sf2", "sf": "sf_i2", "sf_endpoint": "sf_endpoint2"}', True),
         ('{}', False),
-        ('{"location": "DC1", "sfc": "sfc1", "sfc_i": "sfc_i1", "sf": "sf1", "sf_i": "sf_i1"}', False),
-        ('{"place": "DC2", "sfc": "sfc2", "sfc_i": "sfc_i2", "sf": "sf2", "sf_i": "sf_i2", "sf_endpoint": "sf_endpoint2", "sr": "sr2"}', False),
+        ('{"location": "DC1", "server": "DC1", "sfc": "sfc1", "sfc_instance": "sfc_i1", "sf_package": "sf1", "sf": "sf_i1"}', False),
+        ('{"place": "DC2", "server": "DC2", "sfc": "sfc2", "sfc_instance": "sfc_i2", "sf_package": "sf2", "sf": "sf_i2", "sf_endpoint": "sf_endpoint2"}', False),
         ('{invalid json}', False),
     ])
     def test_put_body_validation(self, body, valid):
@@ -246,20 +234,19 @@ class TestWhoamiAPI(object):
         :param valid: True if body is valid, False otherwise
         """
 
-        sf_e = ServiceFunctionEndpoint(location="DC1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i1", sf_endpoint="sf_endpoint1", sr="sr1")
+        sf_e = ServiceFunctionEndpoint(location="DC1", server="DC1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i1", sf_endpoint="sf_endpoint1")
         ServiceFunctionEndpoint.add(sf_e)  # adds the new instance of the model to the database
 
         request = testing.DummyRequest()
         request.params["sf_endpoint"] = "sf_endpoint1"
-        request.params["sf_i"] = "sf_i1"
-        request.params["sr"] = "sr1"
         request.body = body.encode(request.charset)
         error_raised = False
         try:
             WhoamiAPI(request).put()
         except HTTPBadRequest:
             error_raised = True
-        assert error_raised == (not valid), "An error must be raised in case of an invalid request body"
+
+        assert error_raised == (not valid)
 
     def test_delete(self):
         """
@@ -272,25 +259,21 @@ class TestWhoamiAPI(object):
 
         self._validation_of_url_parameters_test("delete")
 
-        sf_e = ServiceFunctionEndpoint(location="DC1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i1", sf_endpoint="sf_endpoint1", sr="sr1")
+        sf_e = ServiceFunctionEndpoint(location="DC1", server="DC1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i1", sf_endpoint="sf_endpoint1")
         to_delete = sf_e.json
         ServiceFunctionEndpoint.add(sf_e)  # adds the new instance of the model to the database
 
-        assert ServiceFunctionEndpoint.exists("sf_i1", "sf_endpoint1", "sr1")
+        assert ServiceFunctionEndpoint.exists("sf_endpoint1")
 
         request = testing.DummyRequest()
         request.params["sf_endpoint"] = "sf_endpoint1"
-        request.params["sf_i"] = "sf_i1"
-        request.params["sr"] = "sr1"
         response = WhoamiAPI(request).delete()
         assert response == to_delete, "DELETE must return the deleted object if successful"
 
-        assert not ServiceFunctionEndpoint.exists("sf_i1", "sf_endpoint1", "sr1"), "Resource must be deleted after the delete API method has been called."
+        assert not ServiceFunctionEndpoint.exists("sf_endpoint1"), "Resource must be deleted after the delete API method has been called."
 
         request = testing.DummyRequest()
         request.params["sf_endpoint"] = "sf_endpoint1"
-        request.params["sf_i"] = "sf_i1"
-        request.params["sr"] = "sr1"
         error_raised = False
         try:
             WhoamiAPI(request).delete()
@@ -321,12 +304,10 @@ class TestWhoamiAPI(object):
             getattr(WhoamiAPI(request), method).__call__()
         except HTTPBadRequest:
             error_raised = True
-        assert error_raised, "Error must be raised in case of insufficient number of arguments"
+        assert error_raised, "Error must be raised in case of wrong arguments"
 
         request = testing.DummyRequest()
         request.params["sf_endp"] = "sf_endpoint"  # argument should be sf_endpoint
-        request.params["sf_i"] = "sf_i"
-        request.params["sr"] = "sr"
         try:
             getattr(WhoamiAPI(request), method).__call__()
         except HTTPBadRequest:
diff --git a/src/service/clmcservice/whoamiapi/utilities.py b/src/service/clmcservice/whoamiapi/utilities.py
index fd141d6025c333eb4e32485d9004a43444a7844c..0ee493671cc0ad6f1a989821ce98df5d70bacaa6 100644
--- a/src/service/clmcservice/whoamiapi/utilities.py
+++ b/src/service/clmcservice/whoamiapi/utilities.py
@@ -31,37 +31,21 @@ def validate_sfendpoint_body(body):
     Validates the request body used to create an endpoint configuration resource in the database.
 
     :param body: the request body to validate
+
     :return the validated configuration dictionary object
+
     :raise AssertionError: if the body is not a valid configuration
     """
 
     try:
         body = loads(body)
-    except:
+    except Exception:
         raise AssertionError("Configuration must be a JSON object.")
 
-    # the database table has one more column which is a UID integer
-    assert len(body) == len(ServiceFunctionEndpoint.__table__.columns) - 1, "Endpoint configuration mustn't contain a different number of attributes than the number of required ones."
+    assert len(body) == len(ServiceFunctionEndpoint.__table__.columns), "Endpoint configuration mustn't contain a different number of attributes than the number of required ones."
 
     # validate that all required attributes are given in the body
-    for attribute in ServiceFunctionEndpoint.required_columns():
-        assert attribute in body, "Required attribute not found in the request content."
+    for attribute in ServiceFunctionEndpoint.__table__.columns:
+        assert attribute.name in body, "Required attribute not found in the request content."
 
     return body
-
-
-def validate_sfendpoint_params(params):
-    """
-    Validates the request parameters to retrieve an endpoint configuration resource from the database.
-
-    :param params: the parameters dictionary to validate
-    :return: the validated parameters
-    :raise AssertionError: for invalid parameters
-    """
-
-    constrained_cols = ServiceFunctionEndpoint.constrained_columns()
-
-    assert len(params) == len(constrained_cols), "Incorrect url query parameters."
-
-    return params
-
diff --git a/src/service/clmcservice/whoamiapi/views.py b/src/service/clmcservice/whoamiapi/views.py
index 3e3a4e650467adb18fbb3b003d051a54dc89dc54..b89bfb5febc963521b43d8aa12ac379520e02ec4 100644
--- a/src/service/clmcservice/whoamiapi/views.py
+++ b/src/service/clmcservice/whoamiapi/views.py
@@ -25,7 +25,7 @@
 from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPNotFound
 from pyramid.view import view_defaults, view_config
 from clmcservice.models import ServiceFunctionEndpoint
-from clmcservice.whoamiapi.utilities import validate_sfendpoint_body, validate_sfendpoint_params
+from clmcservice.whoamiapi.utilities import validate_sfendpoint_body
 
 
 @view_defaults(renderer='json')
@@ -59,13 +59,14 @@ class WhoamiAPI(object):
         GET API call for a single resources.
 
         :return: One service function endpoint configuration instance retrieved from the database by querying the uniquely constrained columns.
+
         :raises HTTPBadRequest: if the request parameters are invalid(invalid url query string)
         :raises HTTPNotFound: if a resource with the given parameters doesn't exist in the database
         """
 
         sf_endpoint = self._get_sf_endpoint_from_url_string()
         if sf_endpoint is None:
-            raise HTTPNotFound("A service function endpoint with the given parameters doesn't exist.")
+            raise HTTPNotFound("A service function endpoint with the given URL query parameters doesn't exist.")
         else:
             return sf_endpoint.json
 
@@ -75,6 +76,7 @@ class WhoamiAPI(object):
         A POST API call to create a new service function endpoint.
 
         :return: A JSON response to the POST call - essentially with the data of the new resource
+
         :raises HTTPBadRequest: if request body is not a valid JSON for the configuration
         :raises HTTPConflict: if the unique constraints are not preserved after the creation of a new instance
         """
@@ -94,13 +96,14 @@ class WhoamiAPI(object):
         A PUT API call to update a service function endpoint.
 
         :return: A JSON response representing the updated object
+
         :raises HTTPBadRequest: if the request parameters are invalid(invalid url query string)
         :raises HTTPNotFound: if a resource with the given parameters doesn't exist in the database
         """
 
         sf_endpoint = self._get_sf_endpoint_from_url_string()
         if sf_endpoint is None:
-            raise HTTPNotFound("A service function endpoint with the given parameters doesn't exist.")
+            raise HTTPNotFound("A service function endpoint with the given identifier doesn't exist.")
         else:
             try:
                 body = self.request.body.decode(self.request.charset)
@@ -110,14 +113,14 @@ class WhoamiAPI(object):
 
             new_resource = validated_body
             old_resource = sf_endpoint.json
-            updating = new_resource["sf_i"] == old_resource["sf_i"] and new_resource["sf_endpoint"] == old_resource["sf_endpoint"] and new_resource["sr"] == old_resource["sr"]
+            updating = new_resource["sf_endpoint"] == old_resource["sf_endpoint"]
 
             if updating:
                 ServiceFunctionEndpoint.delete(sf_endpoint)
                 new_sf_endpoint = ServiceFunctionEndpoint(**validated_body)
                 ServiceFunctionEndpoint.add(new_sf_endpoint)
             else:
-                resource_exists = ServiceFunctionEndpoint.exists(new_resource["sf_i"], new_resource["sf_endpoint"], new_resource["sr"])
+                resource_exists = ServiceFunctionEndpoint.exists(new_resource["sf_endpoint"])
                 if resource_exists:
                     raise HTTPConflict("Service function endpoint with this configuration already exists.")  # error 409 in case of resource conflict
 
@@ -132,6 +135,7 @@ class WhoamiAPI(object):
         Deletes an instance of a service function endpoint configuration in the database.
 
         :return: A content of the object that has been deleted
+
         :raises HTTPBadRequest: if the request parameters are invalid(invalid url query string)
         :raises HTTPNotFound: if a resource with the given parameters doesn't exist in the database
         """
@@ -151,17 +155,12 @@ class WhoamiAPI(object):
         :return: An instance of a service function endpoint configuration or None if not existing
         """
 
-        params = {}
-        for attribute in ServiceFunctionEndpoint.constrained_columns():
-            if attribute in self.request.params:
-                params[attribute] = self.request.params.get(attribute)
+        sf_endpoint_id = self.request.params.get("sf_endpoint")
+        if sf_endpoint_id is None:
+            raise HTTPBadRequest("Request format is incorrect: sf_endpoint ID is not found in the URL query string.")
 
-        try:
-            params = validate_sfendpoint_params(params)
-        except AssertionError as e:
-            raise HTTPBadRequest("Request format is incorrect: {0}".format(e.args))
+        sf_endpoint = ServiceFunctionEndpoint.get(sf_endpoint_id)
 
-        sf_endpoint = ServiceFunctionEndpoint.get(**params)
         return sf_endpoint
 
     def _validate_and_create(self):
@@ -181,9 +180,9 @@ class WhoamiAPI(object):
 
         resource = validated_body
 
-        resource_exists = ServiceFunctionEndpoint.exists(resource["sf_i"], resource["sf_endpoint"], resource["sr"])
+        resource_exists = ServiceFunctionEndpoint.exists(resource["sf_endpoint"])
         if resource_exists:
-            raise HTTPConflict("Service function endpoint with this configuration already exists.")  # error 409 in case of resource conflict
+            raise HTTPConflict("Service function endpoint with the given identifier already exists.")  # error 409 in case of resource conflict
 
         # create an instance of the model
         sf_endpoint = ServiceFunctionEndpoint(**resource)
diff --git a/src/service/development.ini b/src/service/development.ini
index 3d14f33fa23e32ac0ec50d839efcdcf4d080704f..faf52a578c2e639d0a1cf872d0bb4792c8a80cd8 100644
--- a/src/service/development.ini
+++ b/src/service/development.ini
@@ -17,7 +17,7 @@ exclog.ignore =
 # Configuration file path
 configuration_file_path = /etc/flame/clmc/service.conf
 
-network_configuration_path = /vagrant/src/service/network_config.json
+network_configuration_path = /vagrant/src/service/resources/GraphAPI/network_config.json
 
 # PostgreSQL connection url
 sqlalchemy.url = postgresql://clmc:clmc_service@localhost:5432/whoamidb
@@ -26,6 +26,10 @@ sqlalchemy.url = postgresql://clmc:clmc_service@localhost:5432/whoamidb
 influx_host = localhost
 influx_port = 8086
 
+# Kapacitor connection
+kapacitor_host = localhost
+kapacitor_port = 9092
+
 # Neo4j connection
 neo4j_host = localhost
 neo4j_password = admin
diff --git a/src/service/production.ini b/src/service/production.ini
index 33c7ca6cb032aceffb2ac12e21af9416ab1624d2..1716af09d14e2121556b2133aec17a640516805e 100644
--- a/src/service/production.ini
+++ b/src/service/production.ini
@@ -17,7 +17,7 @@ exclog.ignore =
 # Configuration file path
 configuration_file_path = /etc/flame/clmc/service.conf
 
-network_configuration_path = /vagrant/src/service/network_config.json
+network_configuration_path = /vagrant/src/service/resources/GraphAPI/network_config.json
 
 # PostgreSQL connection url
 sqlalchemy.url = postgresql://clmc:clmc_service@localhost:5432/whoamidb
@@ -26,6 +26,10 @@ sqlalchemy.url = postgresql://clmc:clmc_service@localhost:5432/whoamidb
 influx_host = localhost
 influx_port = 8086
 
+# Kapacitor connection
+kapacitor_host = localhost
+kapacitor_port = 9092
+
 # Neo4j connection
 neo4j_host = localhost
 neo4j_password = admin
diff --git a/src/service/network_config.json b/src/service/resources/GraphAPI/network_config.json
similarity index 100%
rename from src/service/network_config.json
rename to src/service/resources/GraphAPI/network_config.json
diff --git a/src/service/resources/TICKscript/deadman-template.tick b/src/service/resources/TICKscript/deadman-template.tick
new file mode 100644
index 0000000000000000000000000000000000000000..2392b7716a3ee60acbaaaef4e6319e13b35f1bf2
--- /dev/null
+++ b/src/service/resources/TICKscript/deadman-template.tick
@@ -0,0 +1,29 @@
+var db string  // database per service function chain, so db is named after sfc
+
+var rp = 'autogen'  // default value for the retention policy
+
+var measurement string
+
+var whereClause = lambda: TRUE  // default value is a function which returns TRUE, hence no filtering of the query result
+
+var messageValue = 'TRUE'  // default value is TRUE, as this is what SFEMC expects as a notification for an event rule
+
+var alertPeriod duration
+
+var throughputThreshold float  // alerts will trigger if data points reported durign the alert period fall bellow this value
+
+var topicID string
+
+
+stream
+    | from()
+        .database(db)
+        .retentionPolicy(rp)
+        .measurement(measurement)
+        .where(whereClause)
+    | deadman(throughputThreshold, alertPeriod)
+        .id(topicID)
+        .details('db=' + db + ',measurement=' + measurement)
+        .message(messageValue)
+        .topic(topicID)
+        .noRecoveries()
\ No newline at end of file
diff --git a/src/service/resources/TICKscript/relative-template.tick b/src/service/resources/TICKscript/relative-template.tick
new file mode 100644
index 0000000000000000000000000000000000000000..2363ef4e2ddd284040e825af1433c2ee658e6ee1
--- /dev/null
+++ b/src/service/resources/TICKscript/relative-template.tick
@@ -0,0 +1,47 @@
+var db string  // database per service function chain, so db is named after sfc
+
+var rp = 'autogen'  // default value for the retention policy
+
+var measurement string
+
+var field string
+
+var influxFunction string
+
+var whereClause = 'TRUE'  // default value is TRUE, hence no filtering of the query result
+
+var messageValue = 'TRUE'  // default value is TRUE, as this is what SFEMC expects as a notification for an event rule
+
+var comparisonLambda lambda  // comparison function e.g. "diff" > 40
+
+var alertPeriod duration
+
+var topicID string
+
+
+var current = batch
+    |query('SELECT ' + influxFunction + '(' + field + ') AS value FROM "' + db + '"."' + rp + '"."' + measurement + '" WHERE ' + whereClause)
+        .period(alertPeriod)
+        .every(alertPeriod)
+        .align()
+
+var past = batch
+    |query('SELECT ' + influxFunction + '(' + field + ') AS value FROM "' + db + '"."' + rp + '"."' + measurement + '" WHERE ' + whereClause)
+        .period(alertPeriod)
+        .every(alertPeriod)
+        .offset(alertPeriod)
+        .align()
+    | shift(alertPeriod)
+
+past
+    | join(current)
+        .as('past', 'current')
+    | eval(lambda: float("current.value" - "past.value"))
+        .as('diff')
+    | alert()
+        .id(topicID)
+        .details('db=' + db + ',measurement=' + measurement)
+        .crit(comparisonLambda)
+        .message(messageValue)
+        .topic(topicID)
+        .noRecoveries()
\ No newline at end of file
diff --git a/src/service/resources/TICKscript/threshold-template.tick b/src/service/resources/TICKscript/threshold-template.tick
new file mode 100644
index 0000000000000000000000000000000000000000..5518814f5a4c652fdf9c6b70496de486261a5678
--- /dev/null
+++ b/src/service/resources/TICKscript/threshold-template.tick
@@ -0,0 +1,32 @@
+var db string  // database per service function chain, so db is named after sfc
+
+var rp = 'autogen'  // default value for the retention policy
+
+var measurement string
+
+var field string
+
+var influxFunction string
+
+var whereClause = 'TRUE'  // default value is TRUE, hence no filtering of the query result
+
+var messageValue = 'TRUE'  // default value is TRUE, as this is what SFEMC expects as a notification for an event rule
+
+var comparisonLambda lambda  // comparison function e.g. "real_value" > 40
+
+var alertPeriod duration
+
+var topicID string
+
+
+batch
+    |query('SELECT ' + influxFunction + '(' + field + ') AS real_value FROM "' + db + '"."' + rp + '"."' + measurement + '" WHERE ' + whereClause)
+        .period(alertPeriod)
+        .every(alertPeriod)
+    |alert()
+        .id(topicID)
+        .details('db=' + db + ',measurement=' + measurement)
+        .crit(comparisonLambda)
+        .message(messageValue)
+        .topic(topicID)
+        .noRecoveries()
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-1.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-1.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..eb47f97eb1b17ed8b91831faf613f49035e05675
--- /dev/null
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-1.yaml
@@ -0,0 +1,73 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+topology_template:
+
+  policies:
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          high_latency:
+            description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+            event_type: threshold
+            metric: network.latency
+            condition:
+              threshold: 45
+              granularity: 120
+              aggregation_method: mean
+              resource_type:
+                flame_location: watershed
+              comparison_operator: gt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/high-latency
+    - requests_diff_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          increase_in_requests:
+            description: |
+              This event triggers when the number of requests has increased relative to the number of requests received
+              120 seconds ago.
+            event_type: relative
+            metric: storage.requests
+            condition:
+              threshold: 100
+              granularity: 120
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: gte
+            action:
+              implementation:
+              - http://sfemc.flame.eu/notify
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          low_requests:
+            description: |
+              This event triggers when the last reported number of requests for a given service function
+              falls behind a given threshold.
+            event_type: threshold
+            metric: storage.requests
+            condition:
+              threshold: 5
+              granularity: 60
+              aggregation_method: last
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: lt
+            action:
+              implementation:
+                # Fails since policy has no HTTP alert handlers in the implementation.
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-10.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-10.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..67793a750eee7654c944c7504acb7e1c1d33a9f5
--- /dev/null
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-10.yaml
@@ -0,0 +1,74 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc_ID: companyA-VR # correct format is sfc, not sfc_ID
+  sfci_ID: companyA-VR-premium # correct format is sfci, not sfci_ID
+
+topology_template:
+
+  policies:
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          high_latency:
+            description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+            event_type: threshold
+            metric: network.latency
+            condition:
+              threshold: 45
+              granularity: 120
+              aggregation_method: mean
+              resource_type:
+                flame_location: watershed
+              comparison_operator: gt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/high-latency
+    - requests_diff_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          decrease_in_requests:
+            description: |
+              This event triggers when the number of requests has decreased relative to the number of requests received
+              120 seconds ago.
+            event_type: relative
+            metric: storage.requests
+            condition:
+              threshold: -100  # requests have decreased by at least 100
+              granularity: 120
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: lte
+            action:
+              implementation:
+              - http://sfemc.flame.eu/notify
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          low_requests:
+            description: |
+              This event triggers when the last reported number of requests for a given service function
+              falls behind a given threshold.
+            event_type: threshold
+            metric: storage.requests
+            condition:
+              threshold: 5
+              granularity: 60
+              aggregation_method: last
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: lt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/low-requests
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-2.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8a06dcf2c47a9c75d6d4dd5516ec9eba11e278b9
--- /dev/null
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-2.yaml
@@ -0,0 +1,50 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+topology_template:
+
+  policies:
+    - missing_measurement_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          missing_storage_measurements:
+            description: This event triggers when the number of storage measurements reported falls below the threshold value.
+            event_type: deadman
+            metric: storage.*
+            condition:
+              threshold: 0
+              granularity: 60
+              resource_type:
+                flame_sfp: storage
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          low_requests:
+            description: |
+              This event triggers when the last reported number of requests for a given service function
+              falls behind a given threshold.
+            event_type: threshold
+            metric: storage.requests
+            condition:
+              threshold: 5
+              granularity: 60
+              aggregation_method: last
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: less than # invalid comparison operator
+            action:
+              implementation:
+                - http://companyA.alert-handler.flame.eu/high-latency
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-3.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-3.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4ed49c37360320e59bd4c881b627217a83ffdd85
--- /dev/null
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-3.yaml
@@ -0,0 +1,35 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+topology_template:
+
+  policies:
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          low_requests:
+            description: |
+              This event triggers when the last reported number of requests for a given service function
+              falls behind a given threshold.
+            event_type: threshold
+            metric: storage.requests
+            condition:
+              threshold: 5
+              granularity: 60
+              aggregation_method: last
+              resource_type:
+                flame_sf_package_id: storage # sf_package_id is not the correct tag name, it is sfp
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: lt
+            action:
+              implementation:
+                - http://companyA.alert-handler.flame.eu/high-latency
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-4.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-4.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..67c403af9c9edfc01e0b11b38bf1a7837f969baf
--- /dev/null
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-4.yaml
@@ -0,0 +1,35 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+topology_template:
+
+  policies:
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          low_requests:
+            description: |
+              This event triggers when the last reported number of requests for a given service function
+              falls behind a given threshold.
+            event_type: threshold
+            metric: storage.requests
+            condition:
+              threshold: 5
+              granularity: 60
+              aggregation_method: average # wrong aggregation method - should be mean, not average
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: lt
+            action:
+              implementation:
+                - http://companyA.alert-handler.flame.eu/high-latency
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-5.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-5.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..735761389c4e4234c451efca3669303ce0108bed
--- /dev/null
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-5.yaml
@@ -0,0 +1,36 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+topology_template:
+
+  policies:
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          low_requests:
+            description: |
+              This event triggers when the last reported number of requests for a given service function
+              falls behind a given threshold.
+            event_type: threshold
+            metric: storage.requests
+            condition:
+              threshold: 5
+              granularity: 60
+              aggregation_method: mean
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: lt
+            action:
+              implementation:
+                - http://companyA.alert-handler.flame.eu/high-latency
+                - sfemc-webhook  # should be a valid URL address
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-6.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-6.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ac13f6fe6eca8ce4b9b80233eba7c65572952360
--- /dev/null
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-6.yaml
@@ -0,0 +1,54 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+topology_template:
+
+  policies:
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          high_latency:
+            description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+            event_type: threshold
+            metric: network-latency  # should be in format measurement.field - e.g. network.latency
+            condition:
+              threshold: 45
+              granularity: 120
+              aggregation_method: mean
+              resource_type:
+                flame_location: watershed
+                flame_server: watershed
+              comparison_operator: gt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/high-latency
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          low_requests:
+            description: |
+              This event triggers when the last reported number of requests for a given service function
+              falls behind a given threshold.
+            event_type: threshold
+            metric: storage.requests
+            condition:
+              threshold: 5
+              granularity: 60
+              aggregation_method: last
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: lt
+            action:
+              implementation:
+                - http://companyA.alert-handler.flame.eu/high-latency
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-7.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-7.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..acb00ed3ac6ff2380c90d12424d6b386b3a45498
--- /dev/null
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-7.yaml
@@ -0,0 +1,69 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+topology_template:
+
+  policies:
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          high_latency:
+            description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+            event_type: threshold-template # should be one of ("threshold", "relative", "deadman")
+            metric: network.latency
+            condition:
+              threshold: 45
+              granularity: 120
+              aggregation_method: mean
+              resource_type:
+                flame_location: watershed
+              comparison_operator: gt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/high-latency
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          low_requests:
+            description: |
+              This event triggers when the last reported number of requests for a given service function
+              falls behind a given threshold.
+            event_type: threshold
+            metric: storage.requests
+            condition:
+              threshold: 5
+              granularity: 60
+              aggregation_method: last
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: lt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/low-requests
+    - missing_measurement_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          missing_storage_measurements:
+            description: This event triggers when the number of storage measurements reported falls below the threshold value.
+            event_type: deadman
+            metric: storage.*
+            condition:
+              threshold: 0
+              granularity: 60
+              resource_type:
+                flame_sfp: storage
+            action:
+              implementation:
+              - http://sfemc.flame.eu/notify
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-8.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-8.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a2c3009707631525caa656294f0cf2461ba2a0b9
--- /dev/null
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-8.yaml
@@ -0,0 +1,77 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+topology_template:
+
+  policies:
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          # should specify at least 1 trigger
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          low_requests:
+            description: |
+              This event triggers when the last reported number of requests for a given service function
+              falls behind a given threshold.
+            event_type: threshold
+            metric: storage.requests
+            condition:
+              threshold: 5
+              granularity: 60
+              aggregation_method: last
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: lt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/low-requests
+    - requests_diff_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          increase_in_requests:
+            description: |
+              This event triggers when the number of requests has increased relative to the number of requests received
+              120 seconds ago.
+            event_type: relative
+            metric: storage.requests
+            condition:
+              threshold: 100  # requests have increased by at least 100
+              granularity: 120
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: gte
+            action:
+              implementation:
+              - http://sfemc.flame.eu/notify
+          decrease_in_requests:
+            description: |
+              This event triggers when the number of requests has decreased relative to the number of requests received
+              120 seconds ago.
+            event_type: relative
+            metric: storage.requests
+            condition:
+              threshold: -100  # requests have decreased by at least 100
+              granularity: 120
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: lte
+            action:
+              implementation:
+              - http://sfemc.flame.eu/notify
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-9.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-9.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..102c476d55265c73c22b46d889fa2a44d0e85930
--- /dev/null
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-9.yaml
@@ -0,0 +1,52 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+# missing metadata section
+
+topology_template:
+
+  policies:
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          high_latency:
+            description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+            event_type: threshold
+            metric: network.latency
+            condition:
+              threshold: 45
+              granularity: 120
+              aggregation_method: mean
+              resource_type:
+                flame_location: watershed
+              comparison_operator: gt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/high-latency
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          low_requests:
+            description: |
+              This event triggers when the last reported number of requests for a given service function
+              falls behind a given threshold.
+            event_type: threshold
+            metric: storage.requests
+            condition:
+              threshold: 5
+              granularity: 60
+              aggregation_method: last
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: lt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/low-requests
diff --git a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-1.yaml b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-1.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..eba786b9bed5f58890851a95b76f8bba7fc19d7e
--- /dev/null
+++ b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-1.yaml
@@ -0,0 +1,55 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+topology_template:
+
+  policies:
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          high_latency:
+            description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+            event_type: threshold
+            metric: network.latency
+            condition:
+              threshold: 45
+              granularity: 120
+              aggregation_method: mean
+              resource_type:
+                flame_location: watershed
+              comparison_operator: gt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/high-latency
+
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          low_requests:
+            description: |
+              This event triggers when the last reported number of requests for a given service function
+              falls behind a given threshold.
+            event_type: threshold
+            metric: storage.requests
+            condition:
+              threshold: 5
+              granularity: 60
+              aggregation_method: last
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: lt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/low-requests
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-2.yaml b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5169f5d4d205f3f9121c5c390a2596235e05de49
--- /dev/null
+++ b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-2.yaml
@@ -0,0 +1,54 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+# no description, valid since it is optional
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+topology_template:
+
+  policies:
+    - requests_diff_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          decrease_in_requests:
+            description: |
+              This event triggers when the number of requests has decreased relative to the number of requests received
+              120 seconds ago.
+            event_type: relative
+            metric: storage.requests
+            condition:
+              threshold: -100  # requests have decreased by at least 100
+              granularity: 120
+              resource_type:
+                flame_sfc: companyA-VR  # sfc tag is also allowed, even though it is already included in the metadata
+                flame_sfci: companyA-VR-premium # sfci tag is also allowed, even though it is already included in the metadata
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: lte
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+
+    - missing_measurement_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          missing_storage_measurements:
+            description: This event triggers when the number of storage measurements reported falls below the threshold value.
+            event_type: deadman
+            # deadman trigger instances monitor the whole measurement (storage in this case), so simply put a star for field value
+            # to be compliant with the <measurement>.<field> format
+            metric: storage.*
+            condition:
+              threshold: 0  # if requests are less than or equal to 0 (in other words, no measurements are reported)
+              granularity: 60  # check for for missing data for the last 60 seconds
+              resource_type:
+                flame_sfp: storage
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-3.yaml b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-3.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ef1c542aa86bbcd08809b850c1bdfb28de2b693b
--- /dev/null
+++ b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-3.yaml
@@ -0,0 +1,52 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+# no description, valid since it is optional
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+topology_template:
+
+  policies:
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          high_latency:
+            # optional description - hence, valid
+            event_type: threshold
+            metric: network.latency
+            condition:
+              threshold: 45
+              granularity: 120
+              # aggregation_method is optional, default value is "mean"
+              resource_type:
+                flame_location: watershed
+              # comparison operator is optional, default value is >= or "gte"
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/high-latency
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          low_requests:
+            # optional description - hence, valid
+            event_type: threshold
+            metric: storage.requests
+            condition:
+              threshold: 5
+              granularity: 60
+              aggregation_method: first
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: lt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/low-requests
diff --git a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-4.yaml b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-4.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..87e31406e07c0b33767108e9c9de2091a160ef79
--- /dev/null
+++ b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-4.yaml
@@ -0,0 +1,68 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+# no description, valid since it is optional
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+topology_template:
+
+  policies:
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          high_latency:
+            # optional description - hence, valid
+            event_type: threshold
+            metric: network.latency
+            condition:
+              threshold: 45
+              granularity: 120
+              aggregation_method: median
+              resource_type:
+                flame_location: watershed
+                flame_server: watershed
+              comparison_operator: gt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/high-latency
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          low_requests:
+            # optional description - hence, valid
+            event_type: threshold
+            metric: storage.requests
+            condition:
+              threshold: 5
+              granularity: 60
+              aggregation_method: first
+              # resource type missing - optional, so it is valid
+              comparison_operator: lt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/low-requests
+    - missing_measurement_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          missing_storage_measurements:
+            description: This event triggers when the number of storage measurements reported falls below the threshold value.
+            event_type: deadman
+            # deadman trigger instances monitor the whole measurement (storage in this case), so simply put a star for field value
+            # to be compliant with the <measurement>.<field> format
+            metric: storage.*
+            condition:
+              threshold: 0  # if requests are less than or equal to 0 (in other words, no measurements are reported)
+              granularity: 60  # check for for missing data for the last 60 seconds
+              resource_type:
+                flame_sfp: storage
+              comparison_operator: gte # although events of type deadman do not use a comparison operator, the validator will not complain if one is given, it will simply ignore it
+            action:
+              implementation:
+              - http://sfemc.flame.eu/notify
diff --git a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-5.yaml b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-5.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f03c52382b2eccbc268617a975cf018bdc8d7c18
--- /dev/null
+++ b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-5.yaml
@@ -0,0 +1,70 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+# no description, valid since it is optional
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+topology_template:
+
+  policies:
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          high_latency:
+            # optional description - hence, valid
+            event_type: relative  # relative template is allowed
+            metric: network.latency
+            condition:
+              threshold: 45
+              granularity: 120
+              aggregation_method: median
+              resource_type:
+                flame_location: watershed
+              comparison_operator: gt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/high-latency
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          low_requests:
+            # optional description - hence, valid
+            event_type: deadman # deadman template is allowed
+            metric: storage.requests
+            condition:
+              threshold: 5
+              granularity: 60
+              aggregation_method: first
+              # resource type missing - optional, so it is valid
+              comparison_operator: lt
+            action:
+              implementation:
+                - https://sfemc.flame.eu/notify
+                - http://localhost:9999/low-requests  # localhost url is also allowed
+    - requests_diff_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          increase_in_requests:
+            description: |
+              This event triggers when the number of requests has increased relative to the number of requests received
+              120 seconds ago.
+            event_type: relative
+            metric: storage.requests
+            condition:
+              threshold: 100  # requests have increased by at least 100
+              granularity: 120
+              aggregation_method: first  # Although events of type relative do not require an aggregation method, the validator will not complain if one is given, it will simply ignore it
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: gte
+            action:
+              implementation:
+              - http://sfemc.flame.eu/notify
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-1.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-1.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..abb5ed1af95604c29c2443622ffa5a09a00b1848
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-1.yaml
@@ -0,0 +1,159 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR-ERROR
+  sfci: companyA-VR-premium
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::high_latency
+              period: 600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::low_requests
+              period: 600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - cooldown_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Based on time constraint, we are shutting down Bristol and Manchester
+        triggers:
+          time_trigger:
+            description: Disconnect Nodes
+            condition:
+              constraint: unknown::time_frame
+              period: 60 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.booted
+        properties:
+          parent: high_latency_check
+          scheduling:
+            dt_start:
+              TZID: "W. Europe Standard Time"
+              time: 20180324T000000
+            dt_end:
+              tzid: "W. Europe Standard Time"
+              time: 20180324T050000
+            rrule:
+              freq: weekly
+              rule:
+                byday: [mo, tu, we, th, su]
+
+    - service_paid:
+        type: eu.ict-flame.policies.StateChange
+        description: Check outstanding payments. If there are outstanding payments, we shutdown the deployed service.
+        triggers:
+          not_paid_trigger:
+            description: Check if the payment is late
+            condition:
+              constraint: unknown::serviceIsNotPaid
+              period: 3600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.shutdown
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_action:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-2.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..93b04a935175eace39fb59358aa90be12a2b1e92
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-2.yaml
@@ -0,0 +1,119 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR
+  sfci: companyA-VR-premium-ERROR
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - requests_diff_policy:
+        type: eu.ict-flame.policies.StateChange
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::decrease_in_requests
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - missing_measurement_policy:
+        type: eu.ict-flame.policies.StateChange
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::missing_storage_measurements
+            action:
+              frontend:
+              - fqdn: frontend.app.ict-flame.eu
+                lifecycle_actions:
+                  Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - another_check:
+        type: eu.ict-flame.policies.StateChange
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: unknown::missing_storage_measurements
+            action:
+              frontend:
+              - fqdn: frontend.app.ict-flame.eu
+                lifecycle_actions:
+                  Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+
+
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-3.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-3.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cd5fea81620ebe515ab2bd0bb2ff70ca0ef4be51
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-3.yaml
@@ -0,0 +1,157 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - high_latency_check: # doesn't match alert spec policy name
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::high_latency
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::low_requests_event  # doesn't match alert spec event ID
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - cooldown_policy:
+        type: eu.ict-flame.policies.Update
+        description: Based on time constraint, we are shutting down Bristol and Manchester
+        triggers:
+          time_trigger:
+            description: Disconnect Nodes
+            condition:
+              constraint: unknown::time_frame
+              period: 60 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.booted
+        properties:
+          parent: high_latency_check
+          scheduling:
+            dt_start:
+              TZID: "W. Europe Standard Time"
+              time: 20180324T000000
+            dt_end:
+              tzid: "W. Europe Standard Time"
+              time: 20180324T050000
+            rrule:
+              freq: weekly
+              rule:
+                byday: [mo, tu, we, th, su]
+
+    - service_paid:
+        type: eu.ict-flame.policies.StateChange
+        description: Check outstanding payments. If there are outstanding payments, we shutdown the deployed service.
+        triggers:
+          not_paid_trigger:
+            description: Check if the payment is late
+            condition:
+              constraint: unknown::serviceIsNotPaid
+              period: 3600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.shutdown
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_action:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-4.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-4.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..deabe9040b5619d9e0b95c4a70fbbcefb92b46e2
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-4.yaml
@@ -0,0 +1,174 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::high_latency
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::low_requests
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - missing_measurement_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::missing_storage_measurements_ID # inconsistent with the respective alert schema
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - cooldown_policy:
+        type: eu.ict-flame.policies.Update
+        description: Based on time constraint, we are shutting down Bristol and Manchester
+        triggers:
+          time_trigger:
+            description: Disconnect Nodes
+            condition:
+              constraint: unknown::time_frame
+              period: 60 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.booted
+        properties:
+          parent: high_latency_check
+          scheduling:
+            dt_start:
+              TZID: "W. Europe Standard Time"
+              time: 20180324T000000
+            dt_end:
+              tzid: "W. Europe Standard Time"
+              time: 20180324T050000
+            rrule:
+              freq: weekly
+              rule:
+                byday: [mo, tu, we, th, su]
+
+    - service_paid:
+        type: eu.ict-flame.policies.StateChange
+        description: Check outstanding payments. If there are outstanding payments, we shutdown the deployed service.
+        triggers:
+          not_paid_trigger:
+            description: Check if the payment is late
+            condition:
+              constraint: unknown::serviceIsNotPaid
+              period: 3600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.shutdown
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_action:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-5.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-5.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..20c1e78bf6027213a6caf1823a97aa307f79746d
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-5.yaml
@@ -0,0 +1,174 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::high_latency
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - low_requests_policy_ID:  # inconsistency with the respective alert specification
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::low_requests
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - requests_diff_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::increase_in_requests
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - cooldown_policy:
+        type: eu.ict-flame.policies.Update
+        description: Based on time constraint, we are shutting down Bristol and Manchester
+        triggers:
+          time_trigger:
+            description: Disconnect Nodes
+            condition:
+              constraint: unknown::time_frame
+              period: 60 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.booted
+        properties:
+          parent: high_latency_check
+          scheduling:
+            dt_start:
+              TZID: "W. Europe Standard Time"
+              time: 20180324T000000
+            dt_end:
+              tzid: "W. Europe Standard Time"
+              time: 20180324T050000
+            rrule:
+              freq: weekly
+              rule:
+                byday: [mo, tu, we, th, su]
+
+    - service_paid:
+        type: eu.ict-flame.policies.StateChange
+        description: Check outstanding payments. If there are outstanding payments, we shutdown the deployed service.
+        triggers:
+          not_paid_trigger:
+            description: Check if the payment is late
+            condition:
+              constraint: unknown::serviceIsNotPaid
+              period: 3600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.shutdown
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_action:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-1.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-1.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ceb65829204591b0d9eec14429014f3a325778c6
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-1.yaml
@@ -0,0 +1,159 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::high_latency
+              period: 600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::low_requests
+              period: 600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - cooldown_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Based on time constraint, we are shutting down Bristol and Manchester
+        triggers:
+          time_trigger:
+            description: Disconnect Nodes
+            condition:
+              constraint: unknown::time_frame
+              period: 60 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.booted
+        properties:
+          parent: high_latency_check
+          scheduling:
+            dt_start:
+              TZID: "W. Europe Standard Time"
+              time: 20180324T000000
+            dt_end:
+              tzid: "W. Europe Standard Time"
+              time: 20180324T050000
+            rrule:
+              freq: weekly
+              rule:
+                byday: [mo, tu, we, th, su]
+
+    - service_paid:
+        type: eu.ict-flame.policies.StateChange
+        description: Check outstanding payments. If there are outstanding payments, we shutdown the deployed service.
+        triggers:
+          not_paid_trigger:
+            description: Check if the payment is late
+            condition:
+              constraint: unknown::serviceIsNotPaid
+              period: 3600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.shutdown
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_action:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-2.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cda9496ad069c82eeebe6b187f24c887a5d77446
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-2.yaml
@@ -0,0 +1,119 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - requests_diff_policy:
+        type: eu.ict-flame.policies.StateChange
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::decrease_in_requests
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - missing_measurement_policy:
+        type: eu.ict-flame.policies.StateChange
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::missing_storage_measurements
+            action:
+              frontend:
+              - fqdn: frontend.app.ict-flame.eu
+                lifecycle_actions:
+                  Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - another_check:
+        type: eu.ict-flame.policies.StateChange
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: unknown::missing_storage_measurements
+            action:
+              frontend:
+              - fqdn: frontend.app.ict-flame.eu
+                lifecycle_actions:
+                  Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+
+
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-3.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-3.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6190b25551dfdc78cba6dcd8ab4353f3c8791a65
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-3.yaml
@@ -0,0 +1,157 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::high_latency
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::low_requests
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - cooldown_policy:
+        type: eu.ict-flame.policies.Update
+        description: Based on time constraint, we are shutting down Bristol and Manchester
+        triggers:
+          time_trigger:
+            description: Disconnect Nodes
+            condition:
+              constraint: unknown::time_frame
+              period: 60 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.booted
+        properties:
+          parent: high_latency_check
+          scheduling:
+            dt_start:
+              TZID: "W. Europe Standard Time"
+              time: 20180324T000000
+            dt_end:
+              tzid: "W. Europe Standard Time"
+              time: 20180324T050000
+            rrule:
+              freq: weekly
+              rule:
+                byday: [mo, tu, we, th, su]
+
+    - service_paid:
+        type: eu.ict-flame.policies.StateChange
+        description: Check outstanding payments. If there are outstanding payments, we shutdown the deployed service.
+        triggers:
+          not_paid_trigger:
+            description: Check if the payment is late
+            condition:
+              constraint: unknown::serviceIsNotPaid
+              period: 3600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.shutdown
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_action:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-4.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-4.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fe46de4d3bbb0e3d204a17de4d8da831d159f2b2
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-4.yaml
@@ -0,0 +1,174 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::high_latency
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::low_requests
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - missing_measurement_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::missing_storage_measurements
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - cooldown_policy:
+        type: eu.ict-flame.policies.Update
+        description: Based on time constraint, we are shutting down Bristol and Manchester
+        triggers:
+          time_trigger:
+            description: Disconnect Nodes
+            condition:
+              constraint: unknown::time_frame
+              period: 60 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.booted
+        properties:
+          parent: high_latency_check
+          scheduling:
+            dt_start:
+              TZID: "W. Europe Standard Time"
+              time: 20180324T000000
+            dt_end:
+              tzid: "W. Europe Standard Time"
+              time: 20180324T050000
+            rrule:
+              freq: weekly
+              rule:
+                byday: [mo, tu, we, th, su]
+
+    - service_paid:
+        type: eu.ict-flame.policies.StateChange
+        description: Check outstanding payments. If there are outstanding payments, we shutdown the deployed service.
+        triggers:
+          not_paid_trigger:
+            description: Check if the payment is late
+            condition:
+              constraint: unknown::serviceIsNotPaid
+              period: 3600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.shutdown
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_action:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-5.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-5.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..82a200cc1fa9cc44e285459bbaa1303829c34c8f
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-5.yaml
@@ -0,0 +1,174 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::high_latency
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::low_requests
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - requests_diff_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::increase_in_requests
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - cooldown_policy:
+        type: eu.ict-flame.policies.Update
+        description: Based on time constraint, we are shutting down Bristol and Manchester
+        triggers:
+          time_trigger:
+            description: Disconnect Nodes
+            condition:
+              constraint: unknown::time_frame
+              period: 60 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.booted
+        properties:
+          parent: high_latency_check
+          scheduling:
+            dt_start:
+              TZID: "W. Europe Standard Time"
+              time: 20180324T000000
+            dt_end:
+              tzid: "W. Europe Standard Time"
+              time: 20180324T050000
+            rrule:
+              freq: weekly
+              rule:
+                byday: [mo, tu, we, th, su]
+
+    - service_paid:
+        type: eu.ict-flame.policies.StateChange
+        description: Check outstanding payments. If there are outstanding payments, we shutdown the deployed service.
+        triggers:
+          not_paid_trigger:
+            description: Check if the payment is late
+            condition:
+              constraint: unknown::serviceIsNotPaid
+              period: 3600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.shutdown
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_action:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
diff --git a/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-1.yaml b/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-1.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a5e9ed304db8f77be067a18b0cb4561b85dd8ce7
--- /dev/null
+++ b/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-1.yaml
@@ -0,0 +1,40 @@
+# Fails since it doesn't specify tosca version, imports and topology_template root node is missing.
+
+metadata:
+    sfc: companyA-VR
+    sfci: companyA-VR-premium
+triggers:
+    high_latency:
+      description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+      event_type: threshold
+      metric: network.latency
+      condition:
+        threshold: 45
+        granularity: 120
+        aggregation_method: mean
+        resource_type:
+          flame_location: watershed
+        comparison_operator: gt
+      action:
+        implementation:
+          - http://sfemc.flame.eu/notify
+          - http://companyA.alert-handler.flame.eu/high-latency
+    low_requests:
+      description: |
+        This event triggers when the last reported number of requests for a given service function
+        falls behind a given threshold.
+      event_type: threshold
+      metric: storage.requests
+      condition:
+        threshold: 5
+        granularity: 60
+        aggregation_method: last
+        resource_type:
+          flame_sfp: storage
+          flame_sf: storage-users
+          flame_location: watershed
+        comparison_operator: lt
+      action:
+        implementation:
+          - http://sfemc.flame.eu/notify
+          - http://companyA.alert-handler.flame.eu/low-requests
diff --git a/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-2.yaml b/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7bd6e44b5b14bb7431630a2549530ee62774d22f
--- /dev/null
+++ b/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-2.yaml
@@ -0,0 +1,51 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+topology_template:
+
+  policies:
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          high_latency:
+            description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+            event_type: threshold
+            metric: network.latency
+            condition:
+              threshold: 45
+              granularity: 120
+              aggregation_method: mean
+              global_tags:  # correct field is called resource_type, not global_tags
+                flame_location: watershed
+              comparison_operator: gt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/high-latency
+    - requests_diff_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          increase_in_requests:
+            description: |
+              This event triggers when the number of requests has increased relative to the number of requests received
+              120 seconds ago.
+            event_type: relative
+            metric: storage.requests
+            condition:
+              threshold: 100
+              granularity: 120
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: gte
+            action:
+              implementation:
+              - http://sfemc.flame.eu/notify
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-3.yaml b/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-3.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..94891fe31c116e93a49f9fc5718e6842849a0a9c
--- /dev/null
+++ b/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-3.yaml
@@ -0,0 +1,54 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+topology_template:
+# missing policies section
+  - high_latency_policy:
+      type: eu.ict-flame.policies.StateChange
+      triggers:
+        high_latency:
+          description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+          event_type: threshold
+          metric: network.latency
+          condition:
+            threshold: 45
+            granularity: 120
+            aggregation_method: mean
+            resource_type:
+              flame_location: watershed
+              flame_server: watershed
+            comparison_operator: gt
+          action:
+            implementation:
+            - http://sfemc.flame.eu/notify
+            - http://companyA.alert-handler.flame.eu/high-latency
+  - low_requests_policy:
+      type: eu.ict-flame.policies.StateChange
+      triggers:
+        low_requests:
+          description: |
+            This event triggers when the last reported number of requests for a given service function
+            falls behind a given threshold.
+          event_type: threshold
+          metric: storage.requests
+          condition:
+            threshold: 5
+            granularity: 60
+            aggregation_method: last
+            resource_type:
+              flame_sfp: storage
+              flame_sf: storage-users
+              flame_location: watershed
+            comparison_operator: lt
+          action:
+            implementation:
+            - http://sfemc.flame.eu/notify
+            - http://companyA.alert-handler.flame.eu/low-requests
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-4.yaml b/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-4.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..76b8a0cd04ca08872f104bbb3e3aff9a505b0de6
--- /dev/null
+++ b/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-4.yaml
@@ -0,0 +1,54 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+# Wrong section name, must be topology_template, not alerts
+alerts:
+  policies:
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          high_latency:
+            description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+            event_type: threshold
+            metric: network.latency
+            condition:
+              threshold: 45
+              granularity: 120
+              aggregation_method: mean
+              resource_type:
+                flame_location: watershed
+              comparison_operator: gt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/high-latency
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          low_requests:
+            description: |
+              This event triggers when the last reported number of requests for a given service function
+              falls behind a given threshold.
+            event_type: threshold
+            metric: storage.requests
+            condition:
+              threshold: 5
+              granularity: 60
+              aggregation_method: last
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: lt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/low-requests
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-1.yaml b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-1.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..928c25ed767ba906158e52c1649d76f184ae64b2
--- /dev/null
+++ b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-1.yaml
@@ -0,0 +1,107 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+topology_template:
+
+  policies:
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          high_latency:
+            description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+            event_type: threshold
+            metric: network.latency
+            condition:
+              threshold: 45
+              granularity: 120
+              aggregation_method: mean
+              resource_type:
+                flame_location: watershed
+                flame_server: watershed
+              comparison_operator: gt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/high-latency
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          low_requests:
+            description: |
+              This event triggers when the last reported number of requests for a given service function
+              falls behind a given threshold.
+            event_type: threshold
+            metric: storage.requests
+            condition:
+              threshold: 5
+              granularity: 60
+              aggregation_method: last
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: lt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/low-requests
+    - requests_diff_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          increase_in_requests:
+            description: |
+              This event triggers when the number of requests has increased relative to the number of requests received
+              120 seconds ago.
+            event_type: relative
+            metric: storage.requests
+            condition:
+              threshold: 100
+              granularity: 120
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: gte
+            action:
+              implementation:
+              - http://sfemc.flame.eu/notify
+          decrease_in_requests:
+            description: |
+              This event triggers when the number of requests has decreased relative to the number of requests received
+              120 seconds ago.
+            event_type: relative
+            metric: storage.requests
+            condition:
+              threshold: -100
+              granularity: 120
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: lte
+            action:
+              implementation:
+              - http://sfemc.flame.eu/notify
+    - missing_measurement_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          missing_storage_measurements:
+            description: This event triggers when the number of storage measurements reported falls below the threshold value.
+            event_type: deadman
+            metric: storage.*
+            condition:
+              threshold: 0
+              granularity: 60
+              resource_type:
+                flame_sfp: storage
+            action:
+              implementation:
+              - http://sfemc.flame.eu/notify
diff --git a/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-2.yaml b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5924c13817555788959d792044500d7e0dfe00bb
--- /dev/null
+++ b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-2.yaml
@@ -0,0 +1,62 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VirtualReality
+  sfci: premium
+
+topology_template:
+
+  policies:
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          high_latency:
+            description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+            event_type: threshold
+            metric: network.latency
+            condition:
+              threshold: 45
+              granularity: 120
+              aggregation_method: mean
+              comparison_operator: gt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/high-latency
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          low_requests:
+            description: |
+              This event triggers when the last reported number of requests for a given service function
+              falls behind a given threshold.
+            event_type: threshold
+            metric: storage.requests
+            condition:
+              threshold: 5
+              granularity: 60
+              aggregation_method: last
+              comparison_operator: lt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/low-requests
+    - missing_measurement_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          missing_storage_measurements:
+            event_type: deadman
+            metric: storage.field
+            condition:
+              threshold: 0
+              granularity: 60
+              resource_type:
+                flame_sfp: storage
+            action:
+              implementation:
+              - http://sfemc.flame.eu/notify
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-3.yaml b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-3.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..118797e8b5c1889b4c81a0cbeadecfd5cd141aeb
--- /dev/null
+++ b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-3.yaml
@@ -0,0 +1,30 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+topology_template:
+
+  policies:
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          high_latency:
+            description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+            event_type: threshold
+            metric: network.latency
+            condition:
+              threshold: 45
+              granularity: 120
+              aggregation_method: mean
+              comparison_operator: gt
+            action:
+              implementation:
+                - http://sfemc.flame.eu/notify
+                - http://companyA.alert-handler.flame.eu/high-latency
diff --git a/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-4.yaml b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-4.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..64b19cbe37225546b4e5c73adfb83ee47c9d981d
--- /dev/null
+++ b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-4.yaml
@@ -0,0 +1,52 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+topology_template:
+
+  policies:
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          low_requests:
+            description: |
+              This event triggers when the last reported number of requests for a given service function
+              falls behind a given threshold.
+            event_type: threshold
+            metric: storage.requests
+            condition:
+              threshold: 5
+              granularity: 60
+              aggregation_method: last
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: lt
+            action:
+              implementation:
+                - http://companyA.alert-handler.flame.eu/low-requests
+    - requests_diff_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          increase_in_requests:
+            event_type: relative
+            metric: storage.requests
+            condition:
+              threshold: 100
+              granularity: 120
+              resource_type:
+                flame_sfp: storage
+                flame_sf: storage-users
+                flame_location: watershed
+              comparison_operator: gte
+            action:
+              implementation:
+              - http://sfemc.flame.eu/notify
\ No newline at end of file
diff --git a/src/service/setup.py b/src/service/setup.py
index c3fbf0e617197626ec492eec7553feae7620d31d..5d966f515271706b94fa643595c734e799999e72 100644
--- a/src/service/setup.py
+++ b/src/service/setup.py
@@ -22,20 +22,31 @@
 """
 
 
+# Python standard libs
 import os
 import os.path
 from setuptools import setup, find_packages
 
 
-def read(fname):
-    return open(os.path.join(os.path.dirname(__file__), fname)).read()
+def get_version(*relative_path):
+    """
+    Reads and parses a version file.
 
+    :param relative_path: iterable representing the relative path to the version file
+    :return:
+    """
+
+    fname = os.path.join(os.path.dirname(__file__), *relative_path)
 
-def get_version(fname):
     if os.path.isfile(fname):
-      git_revision = read(fname)
+        with open(fname) as f:  # Use context managers when opening files, otherwise file handlers might not be properly closed
+            version = {}
+            # execute the version file and put its content in the version dictionary
+            exec(f.read(), version)
+            # extract the __version__ variable from the dictionary, if not found use default value "SNAPSHOT"
+            git_revision = version.get("__version__", "SNAPSHOT")
     else:
-      git_revision = "SNAPSHOT"
+        git_revision = "SNAPSHOT"
 
     return git_revision
 
@@ -51,7 +62,11 @@ requires = [
     'psycopg2',
     'influxdb',
     'neo4j-driver',
-    'py2neo'
+    'py2neo',
+    'pyyaml',
+    'tosca-parser',
+    'schema',
+    'requests'
 ]
 
 tests_require = [
@@ -61,7 +76,7 @@ tests_require = [
 
 setup(
     name="clmcservice",
-    version=get_version("_version.py"),
+    version=get_version("VERSION"),
     author="Michael Boniface",
     author_email="mjb@it-innovation.soton.ac.uk",
     description="FLAME CLMC Service Module",
@@ -75,7 +90,6 @@ setup(
     extras_require={
         'testing': tests_require,
     },
-    package_data={'': ['_version.py']},
     classifiers=[
         "Development Status :: Alpha",
         "Topic :: FLAME CLMC Service",
diff --git a/src/test/MANIFEST.in b/src/test/MANIFEST.in
index 839a767b00e517e889ce264b9dff90252ddd44bf..e7b77446f3e60c33aa86e5381f66c0c564ade835 100644
--- a/src/test/MANIFEST.in
+++ b/src/test/MANIFEST.in
@@ -1,2 +1,2 @@
-include MANIFEST.in
-recursive-include clmctest _version.py *.yml *.sh *.json *.conf
\ No newline at end of file
+include VERSION
+recursive-include clmctest *.yml *.yaml *.sh *.json *.conf
\ No newline at end of file
diff --git a/src/test/VERSION b/src/test/VERSION
new file mode 100644
index 0000000000000000000000000000000000000000..4a2bfa871aa7cbcb89e5d84bf7020312f591bb5d
--- /dev/null
+++ b/src/test/VERSION
@@ -0,0 +1 @@
+__version__ = "1.2.0"
\ No newline at end of file
diff --git a/src/test/clmctest/alerts/__init__.py b/src/test/clmctest/alerts/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a93a4bf16d8eee8666c6a28c3e40306983804a29
--- /dev/null
+++ b/src/test/clmctest/alerts/__init__.py
@@ -0,0 +1 @@
+#!/usr/bin/python3
diff --git a/src/test/clmctest/alerts/alert_handler_server.py b/src/test/clmctest/alerts/alert_handler_server.py
new file mode 100644
index 0000000000000000000000000000000000000000..514b8b565dc952ea9875e74650013c6eb9dc87e8
--- /dev/null
+++ b/src/test/clmctest/alerts/alert_handler_server.py
@@ -0,0 +1,85 @@
+#!/usr/bin/python3
+"""
+## © University of Southampton IT Innovation Centre, 2018
+##
+## Copyright in this software belongs to University of Southampton
+## IT Innovation Centre of Gamma House, Enterprise Road,
+## Chilworth Science Park, Southampton, SO16 7NS, UK.
+##
+## This software may not be used, sold, licensed, transferred, copied
+## or reproduced in whole or in part in any manner or form or in or
+## on any media by any person other than in accordance with the terms
+## of the Licence Agreement supplied with the software, or otherwise
+## without the prior written consent of the copyright owners.
+##
+## This software is distributed WITHOUT ANY WARRANTY, without even the
+## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+## PURPOSE, except where stated in the Licence Agreement supplied with
+## the software.
+##
+##      Created By :            Nikolay Stanchev
+##      Created Date :          22-08-2018
+##      Created for Project :   FLAME
+"""
+
+
+from http.server import HTTPServer, BaseHTTPRequestHandler
+from json import loads, dump
+from os.path import join
+
+
+LOG_TEST_FOLDER_PATH = "/var/log/flame/clmc/alerts"
+
+
+class CustomHTTPHandler(BaseHTTPRequestHandler):
+    """
+    An http handler used in the integration test of CLMC alerts.
+    """
+
+    def _set_headers(self):
+        """
+        Sets up headers used to send back a response to a received request
+        """
+
+        self.send_response(200)
+        self.send_header('Content-type', 'application/json')
+        self.end_headers()
+
+    def do_POST(self):
+        """
+        This method handles any POST requests made to the server - used to handle the incoming POST requests from Kapacitor.
+        """
+
+        global LOG_TEST_FOLDER_PATH
+
+        # read post data and message ID - the topic from Kapacitor
+        content_length = int(self.headers['Content-Length'])
+        post_data = self.rfile.read(content_length)
+        post_data = post_data.decode(self.headers.get('Accept-Charset', "utf-8"))
+        post_data = loads(post_data)  # load to json
+        msg_id = post_data["id"]
+
+        # write data to log file named after the message ID
+        with open(join(LOG_TEST_FOLDER_PATH, "alert-{0}.log".format(msg_id)), "w+") as fh:
+            dump(fp=fh, obj=post_data)
+
+        # send back a response (needed to mimic the behaviour of an actual http server)
+        self._set_headers()
+        self.wfile.write(b"{\"msg\": \"accepted\"}")
+
+
+def run(server_class=HTTPServer, handler_class=BaseHTTPRequestHandler):
+    """
+    Starts the server on port 9999
+
+    :param server_class: defaults to HTTPServer (standard lib)
+    :param handler_class: defaults to the Base HTTP request handler (standard lib)
+    """
+
+    server_address = ('', 9999)
+    httpd = server_class(server_address, handler_class)
+    httpd.serve_forever()
+
+
+if __name__ == "__main__":
+    run(handler_class=CustomHTTPHandler)  # run server with the custom http handler
diff --git a/src/test/clmctest/alerts/alerts_test_config.yaml b/src/test/clmctest/alerts/alerts_test_config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..54d504e78466a7b324d9b1e1dcfe957edac8a857
--- /dev/null
+++ b/src/test/clmctest/alerts/alerts_test_config.yaml
@@ -0,0 +1,87 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+description: TOSCA Alerts Configuration document
+
+imports:
+- flame_clmc_alerts_definitions.yaml
+
+metadata:
+  sfc: MS_Template_1
+  sfci: MS_I1
+
+topology_template:
+
+  policies:
+    - scale_nginx_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          high_requests:
+            description: |
+              This event triggers when the number of requests for a given service function
+              exceeds a given threshold.
+            event_type: threshold
+            metric: nginx.requests
+            condition:
+              threshold: 5
+              granularity: 5
+              aggregation_method: mean
+              resource_type:
+                flame_location: DC1
+              comparison_operator: gte
+            action:
+              implementation:
+                - http://172.40.231.200:9999/
+          high_cpu_usage:
+            description: This event triggers when the cpu system usage is too high.
+            event_type: threshold
+            metric: cpu.usage_system
+            condition:
+              threshold: 0.2
+              granularity: 10
+              aggregation_method: mean
+              resource_type:
+                flame_location: DC1
+                flame_sfp: nginx
+              comparison_operator: gte
+            action:
+              implementation:
+                - http://172.40.231.200:9999/
+          increase_in_active_requests:
+            description: This event triggers when the cpu system usage is too high.
+            event_type: relative
+            metric: nginx.accepts
+            condition:
+              threshold: 5
+              granularity: 10
+              resource_type:
+                flame_sfc: MS_Template_1  # value is already given in metadata so this is optional
+                flame_sfci: MS_I1  # value is already given in metadata so this is optional
+                flame_sfp: nginx
+                flame_sf: adaptive_streaming_nginx_I1
+                flame_location: DC1
+                flame_server: DC1
+              comparison_operator: gte
+            action:
+              implementation:
+                - http://172.40.231.200:9999/
+    - deadman_policy:
+        type: eu.ict-flame.policies.StateChange
+        triggers:
+          no_measurements:
+            description: |
+              This event triggers when RTT measurements are missing for more than 12 seconds.
+            event_type: deadman
+            metric: clcm.rtt
+            condition:
+              threshold: 0
+              granularity: 5
+              resource_type:
+                flame_sfc: MS_Template_1  # value is already given in metadata so this is optional
+                flame_sfci: MS_I1  # value is already given in metadata so this is optional
+                flame_sfp: nginx
+                flame_sf: adaptive_streaming_nginx_I1
+                flame_location: DC1
+                flame_server: DC1
+            action:
+              implementation:
+                - http://172.40.231.200:9999/
\ No newline at end of file
diff --git a/src/test/clmctest/alerts/conftest.py b/src/test/clmctest/alerts/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..73a65b726d0f6af32e18f71d21b90d30e4189b77
--- /dev/null
+++ b/src/test/clmctest/alerts/conftest.py
@@ -0,0 +1,107 @@
+#!/usr/bin/python3
+"""
+## © University of Southampton IT Innovation Centre, 2018
+##
+## Copyright in this software belongs to University of Southampton
+## IT Innovation Centre of Gamma House, Enterprise Road,
+## Chilworth Science Park, Southampton, SO16 7NS, UK.
+##
+## This software may not be used, sold, licensed, transferred, copied
+## or reproduced in whole or in part in any manner or form or in or
+## on any media by any person other than in accordance with the terms
+## of the Licence Agreement supplied with the software, or otherwise
+## without the prior written consent of the copyright owners.
+##
+## This software is distributed WITHOUT ANY WARRANTY, without even the
+## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+## PURPOSE, except where stated in the Licence Agreement supplied with
+## the software.
+##
+##      Created By :            Nikolay Stanchev
+##      Created Date :          22-08-2018
+##      Created for Project :   FLAME
+"""
+
+from time import sleep
+from pytest import fixture
+from subprocess import Popen, DEVNULL
+from os.path import join, dirname, exists
+from os import kill, makedirs
+from shutil import rmtree
+from signal import SIGKILL
+from json import load
+from pkg_resources import resource_filename
+from requests import delete, get
+from clmctest.alerts.alert_handler_server import LOG_TEST_FOLDER_PATH
+
+
+KAPACITOR_PORT = 9092
+
+
+@fixture(scope="module")
+def rspec_config():
+    """
+    Reads the service configuration deployed for the integration tests.
+
+    :return: the python object representing the read JSON file
+    """
+
+    rspec = resource_filename('clmctest', 'rspec.json')
+    print("\nrspec file: {0}".format(rspec))
+
+    with open(rspec, 'r') as stream:
+        data_loaded = load(stream)
+    return data_loaded
+
+
+@fixture(autouse=True, scope="module")
+def set_up_tear_down_fixture(rspec_config):
+    """
+    Set up/tear down fixture for the alerts integration test.
+    """
+
+    global KAPACITOR_PORT
+
+    kapacitor_host = None
+    for host in rspec_config:
+        if host["name"] == "clmc-service":
+            kapacitor_host = host["ip_address"]
+            break
+
+    assert kapacitor_host is not None
+
+    kapacitor_url = "http://{0}:{1}".format(kapacitor_host, KAPACITOR_PORT)
+
+    if exists(LOG_TEST_FOLDER_PATH):
+        rmtree(LOG_TEST_FOLDER_PATH)  # clean out the log directory
+    makedirs(LOG_TEST_FOLDER_PATH)  # create the log directory
+
+    print("\nStarting alert handler HTTP server...")
+    http_server_file = join(dirname(__file__), "alert_handler_server.py")
+    p = Popen(["python3", http_server_file], stdout=DEVNULL, stderr=DEVNULL)
+    process_id = p.pid
+    sleep(1)
+    print("Server started with PID {0}".format(process_id))
+
+    yield
+
+    print("\nKilling process with PID {0}".format(process_id))
+    kill(process_id, SIGKILL)
+    if exists(LOG_TEST_FOLDER_PATH):
+        rmtree(LOG_TEST_FOLDER_PATH)
+
+    print("Deleting Kapacitor tasks, topics and handlers that were created for this test...")
+    # get all tasks from kapacitor (that were created in this test) and delete them
+    kapacitor_tasks = get("{0}/kapacitor/v1/tasks".format(kapacitor_url)).json()["tasks"]
+    kapacitor_task_links = [task["link"]["href"] for task in kapacitor_tasks]
+    for task_link in kapacitor_task_links:
+        delete("{0}{1}".format(kapacitor_url, task_link))
+
+    # get all topics and handlers from kapacitor (that were created in this test) and delete them
+    kapacitor_topics = get("{0}/kapacitor/v1/alerts/topics".format(kapacitor_url)).json()["topics"]
+    for topic in kapacitor_topics:
+        topic_handlers = get("{0}{1}".format(kapacitor_url, topic["handlers-link"]["href"])).json()["handlers"]
+        for handler in topic_handlers:
+            delete("{0}{1}".format(kapacitor_url, handler["link"]["href"]))
+
+        delete("{0}{1}".format(kapacitor_url, topic["link"]["href"]))
diff --git a/src/test/clmctest/alerts/resources_test_config.yaml b/src/test/clmctest/alerts/resources_test_config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..45591f166d254eab8840a276a3897d6210f0cf1b
--- /dev/null
+++ b/src/test/clmctest/alerts/resources_test_config.yaml
@@ -0,0 +1,141 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame CLMC Alerts Integration Test
+  sfc: MS_Template_1
+  sfci: MS_I1
+
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - scale_nginx_policy:
+        type: eu.ict-flame.policies.StateChange
+        properties:
+          parent: service_paid
+        triggers:
+          tigger_a:
+            condition:
+              constraint: clmc::high_requests
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+          tigger_b:
+            condition:
+              constraint: clmc::high_cpu_usage
+              period: 600 # integer required, unit: seconds
+            action:
+              frontend:
+              - fqdn: frontend.app.ict-flame.eu
+                lifecycle_actions:
+                  Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+          tigger_c:
+            condition:
+              constraint: clmc::increase_in_active_requests
+              period: 600 # integer required, unit: seconds
+            action:
+              frontend:
+              - fqdn: frontend.app.ict-flame.eu
+                lifecycle_actions:
+                  Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - deadman_policy:
+        type: eu.ict-flame.policies.StateChange
+        properties:
+          parent: service_paid
+        triggers:
+          tigger_a:
+            condition:
+              constraint: clmc::no_measurements
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - service_paid:
+        type: eu.ict-flame.policies.StateChange
+        description: Check outstanding payments. If there are outstanding payments, we shutdown the deployed service.
+        triggers:
+          not_paid_trigger:
+            description: Check if the payment is late
+            condition:
+              constraint: clmc-user-db::serviceIsNotPaid  # this will be ignored, source must be clmc
+              period: 3600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.shutdown
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_action:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
diff --git a/src/test/clmctest/alerts/test_alerts.py b/src/test/clmctest/alerts/test_alerts.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5535f459f117cbcd5d2b7cc5effc522c6c07360
--- /dev/null
+++ b/src/test/clmctest/alerts/test_alerts.py
@@ -0,0 +1,88 @@
+#!/usr/bin/python3
+"""
+## © University of Southampton IT Innovation Centre, 2018
+##
+## Copyright in this software belongs to University of Southampton
+## IT Innovation Centre of Gamma House, Enterprise Road,
+## Chilworth Science Park, Southampton, SO16 7NS, UK.
+##
+## This software may not be used, sold, licensed, transferred, copied
+## or reproduced in whole or in part in any manner or form or in or
+## on any media by any person other than in accordance with the terms
+## of the Licence Agreement supplied with the software, or otherwise
+## without the prior written consent of the copyright owners.
+##
+## This software is distributed WITHOUT ANY WARRANTY, without even the
+## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+## PURPOSE, except where stated in the Licence Agreement supplied with
+## the software.
+##
+##      Created By :            Nikolay Stanchev
+##      Created Date :          22-08-2018
+##      Created for Project :   FLAME
+"""
+
+from time import sleep
+from requests import post, get
+from os import listdir
+from os.path import join, dirname
+from clmctest.alerts.alert_handler_server import LOG_TEST_FOLDER_PATH
+
+
+CLMC_SERVICE_PORT = 9080
+NGINX_PORT = 80
+
+
+class TestAlerts(object):
+
+    def test_alert_triggers(self, rspec_config):
+        """
+        Test is implemented using the following steps:
+            * Send clmc service a TOSCA alert spec. file
+            * Wait 15 seconds for Kapacitor to configure and start executing the defined tasks
+            * Send some test requests to nginx to increase the load
+            * Wait 20 seconds for alerts to be triggered
+            * Check that 4 log files have been created - one for each alert defined in the alert spec.
+
+        :param rspec_config: fixture from conftest.py
+        """
+
+        global CLMC_SERVICE_PORT, NGINX_PORT
+
+        clmc_service_host, nginx_host = None, None
+        for host in rspec_config:
+            if host["name"] == "clmc-service":
+                clmc_service_host = host["ip_address"]
+            elif host["name"] == "nginx":
+                nginx_host = host["ip_address"]
+
+            if clmc_service_host is not None and nginx_host is not None:
+                break
+
+        print("Sending alerts specification to clmc service...")
+        alerts_spec = join(dirname(__file__), "alerts_test_config.yaml")
+        resources_spec = join(dirname(__file__), "resources_test_config.yaml")
+
+        with open(alerts_spec, 'rb') as alerts:
+            with open(resources_spec, 'rb') as resources:
+                files = {'alert-spec': alerts, 'resource-spec': resources}
+                response = post("http://{0}:{1}/alerts".format(clmc_service_host, CLMC_SERVICE_PORT), files=files)
+                assert response.status_code == 200
+                clmc_service_response = response.json()
+                assert "triggers_specification_errors" not in clmc_service_response, "Unexpected error was returned for triggers specification"
+                assert "triggers_action_errors" not in clmc_service_response, "Unexpected error was returned for handlers specification"
+        print("Alert spec sent successfully")
+
+        print("Wait 10 seconds for Kapacitor stream/batch tasks to start working...")
+        sleep(10)
+
+        print("Sending test requests to nginx...")
+        for i in range(40):
+            response = get("http://{0}:{1}/".format(nginx_host, NGINX_PORT))
+            assert response.status_code == 200
+            sleep(0.25)
+
+        print("Wait 15 seconds for Kapacitor to trigger alerts...")
+        sleep(15)
+
+        assert len(listdir(LOG_TEST_FOLDER_PATH)) == 4, "4 log files must have been created - one for each alert defined in the specification."
diff --git a/src/test/clmctest/dashboards/dc_dash.json b/src/test/clmctest/dashboards/dc_dash.json
index 7d98cf5c9422218b10513de1e66564c78450a639..318e467dbacf337964df9d6988420f4e141a6c47 100644
--- a/src/test/clmctest/dashboards/dc_dash.json
+++ b/src/test/clmctest/dashboards/dc_dash.json
@@ -10,7 +10,7 @@
       "name": "Mean %CPU",
       "queries": [
         {
-          "query": "SELECT mean(\"cpu_usage\") AS \"mean_cpu_usage\" FROM \"MSDemo\".\"autogen\".\"procstat\" WHERE time > :dashboardTime: AND \"location\"= :location: GROUP BY time(:interval:) FILL(null)",
+          "query": "SELECT mean(\"cpu_usage\") AS \"mean_cpu_usage\" FROM \"MSDemo\".\"autogen\".\"procstat\" WHERE time > :dashboardTime: AND \"flame_location\"= :location: GROUP BY time(:interval:) FILL(null)",
           "queryConfig": {
             "database": "",
             "measurement": "",
@@ -22,7 +22,7 @@
               "tags": []
             },
             "areTagsAccepted": false,
-            "rawText": "SELECT mean(\"cpu_usage\") AS \"mean_cpu_usage\" FROM \"MSDemo\".\"autogen\".\"procstat\" WHERE time > :dashboardTime: AND \"location\"= :location: GROUP BY time(:interval:) FILL(null)",
+            "rawText": "SELECT mean(\"cpu_usage\") AS \"mean_cpu_usage\" FROM \"MSDemo\".\"autogen\".\"procstat\" WHERE time > :dashboardTime: AND \"flame_location\"= :location: GROUP BY time(:interval:) FILL(null)",
             "range": null,
             "shifts": null
           },
@@ -129,7 +129,7 @@
         "influxql": "SHOW TAG VALUES ON :database: FROM :measurement: WITH KEY=:tagKey:",
         "db": "MSDemo",
         "measurement": "cpu",
-        "tagKey": "location",
+        "tagKey": "flame_location",
         "fieldKey": ""
       },
       "links": {
diff --git a/src/test/clmctest/dashboards/minio_dash.json b/src/test/clmctest/dashboards/minio_dash.json
index 906365095d80ef5a7f9c85d21053a9e95ba4aafb..a92200dd7b0129b8648a6c45694fce2da48c700b 100644
--- a/src/test/clmctest/dashboards/minio_dash.json
+++ b/src/test/clmctest/dashboards/minio_dash.json
@@ -10,7 +10,7 @@
       "name": "minio2: Network RX",
       "queries": [
         {
-          "query": "SELECT derivative(max(\"bytes_recv\")) / 62914560 AS \"RX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"ipendpoint\"=:minio2: GROUP BY time(1m)",
+          "query": "SELECT derivative(max(\"bytes_recv\")) / 62914560 AS \"RX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfe\"=:minio2: GROUP BY time(1m)",
           "queryConfig": {
             "database": "",
             "measurement": "",
@@ -22,7 +22,7 @@
               "tags": []
             },
             "areTagsAccepted": false,
-            "rawText": "SELECT derivative(max(\"bytes_recv\")) / 62914560 AS \"RX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"ipendpoint\"=:minio2: GROUP BY time(1m)",
+            "rawText": "SELECT derivative(max(\"bytes_recv\")) / 62914560 AS \"RX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfe\"=:minio2: GROUP BY time(1m)",
             "range": null,
             "shifts": null
           },
@@ -120,7 +120,7 @@
       "name": "minio1: Network RX",
       "queries": [
         {
-          "query": "SELECT derivative(max(\"bytes_recv\")) / 62914560 AS \"RX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"ipendpoint\"=:minio1: GROUP BY time(1m)",
+          "query": "SELECT derivative(max(\"bytes_recv\")) / 62914560 AS \"RX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfe\"=:minio1: GROUP BY time(1m)",
           "queryConfig": {
             "database": "",
             "measurement": "",
@@ -132,7 +132,7 @@
               "tags": []
             },
             "areTagsAccepted": false,
-            "rawText": "SELECT derivative(max(\"bytes_recv\")) / 62914560 AS \"RX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"ipendpoint\"=:minio1: GROUP BY time(1m)",
+            "rawText": "SELECT derivative(max(\"bytes_recv\")) / 62914560 AS \"RX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfe\"=:minio1: GROUP BY time(1m)",
             "range": null,
             "shifts": null
           },
@@ -230,7 +230,7 @@
       "name": "minio2: Network TX",
       "queries": [
         {
-          "query": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"TX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"ipendpoint\"=:minio2: GROUP BY time(1m)",
+          "query": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"TX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfe\"=:minio2: GROUP BY time(1m)",
           "queryConfig": {
             "database": "",
             "measurement": "",
@@ -242,7 +242,7 @@
               "tags": []
             },
             "areTagsAccepted": false,
-            "rawText": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"TX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"ipendpoint\"=:minio2: GROUP BY time(1m)",
+            "rawText": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"TX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfe\"=:minio2: GROUP BY time(1m)",
             "range": null,
             "shifts": null
           },
@@ -340,7 +340,7 @@
       "name": "minio1: Network TX",
       "queries": [
         {
-          "query": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"TX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"ipendpoint\"=:minio1: GROUP BY time(1m)",
+          "query": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"TX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfe\"=:minio1: GROUP BY time(1m)",
           "queryConfig": {
             "database": "",
             "measurement": "",
@@ -352,7 +352,7 @@
               "tags": []
             },
             "areTagsAccepted": false,
-            "rawText": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"TX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"ipendpoint\"=:minio1: GROUP BY time(1m)",
+            "rawText": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"TX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfe\"=:minio1: GROUP BY time(1m)",
             "range": null,
             "shifts": null
           },
@@ -450,7 +450,7 @@
       "name": "minio2: Response time",
       "queries": [
         {
-          "query": "SELECT 100*last(\"0.001\") / last(\"count\") AS \"0.001\", 100*last(\"0.003\") / last(\"count\") AS \"0.003\", 100*last(\"0.005\") / last(\"count\") AS \"0.005\", 100*last(\"0.1\") / last(\"count\") AS \"0.1\", 100*last(\"0.5\") / last(\"count\") AS \"0.5\", 100*last(\"1\") / last(\"count\") AS \"1\" FROM \"MSDemo\".\"autogen\".\"minio_http_requests_duration_seconds\" WHERE time > :dashboardTime: and \"ipendpoint\"=:minio2: GROUP BY time(:interval:) FILL(null)",
+          "query": "SELECT 100*last(\"0.001\") / last(\"count\") AS \"0.001\", 100*last(\"0.003\") / last(\"count\") AS \"0.003\", 100*last(\"0.005\") / last(\"count\") AS \"0.005\", 100*last(\"0.1\") / last(\"count\") AS \"0.1\", 100*last(\"0.5\") / last(\"count\") AS \"0.5\", 100*last(\"1\") / last(\"count\") AS \"1\" FROM \"MSDemo\".\"autogen\".\"minio_http_requests_duration_seconds\" WHERE time > :dashboardTime: and \"flame_sfe\"=:minio2: GROUP BY time(:interval:) FILL(null)",
           "queryConfig": {
             "database": "",
             "measurement": "",
@@ -462,7 +462,7 @@
               "tags": []
             },
             "areTagsAccepted": false,
-            "rawText": "SELECT 100*last(\"0.001\") / last(\"count\") AS \"0.001\", 100*last(\"0.003\") / last(\"count\") AS \"0.003\", 100*last(\"0.005\") / last(\"count\") AS \"0.005\", 100*last(\"0.1\") / last(\"count\") AS \"0.1\", 100*last(\"0.5\") / last(\"count\") AS \"0.5\", 100*last(\"1\") / last(\"count\") AS \"1\" FROM \"MSDemo\".\"autogen\".\"minio_http_requests_duration_seconds\" WHERE time > :dashboardTime: and \"ipendpoint\"=:minio2: GROUP BY time(:interval:) FILL(null)",
+            "rawText": "SELECT 100*last(\"0.001\") / last(\"count\") AS \"0.001\", 100*last(\"0.003\") / last(\"count\") AS \"0.003\", 100*last(\"0.005\") / last(\"count\") AS \"0.005\", 100*last(\"0.1\") / last(\"count\") AS \"0.1\", 100*last(\"0.5\") / last(\"count\") AS \"0.5\", 100*last(\"1\") / last(\"count\") AS \"1\" FROM \"MSDemo\".\"autogen\".\"minio_http_requests_duration_seconds\" WHERE time > :dashboardTime: and \"flame_sfe\"=:minio2: GROUP BY time(:interval:) FILL(null)",
             "range": null,
             "shifts": null
           },
@@ -560,7 +560,7 @@
       "name": "minio1: Response time",
       "queries": [
         {
-          "query": "SELECT 100*last(\"0.001\") / last(\"count\") AS \"0.001\", 100*last(\"0.003\") / last(\"count\") AS \"0.003\", 100*last(\"0.005\") / last(\"count\") AS \"0.005\", 100*last(\"0.1\") / last(\"count\") AS \"0.1\", 100*last(\"0.5\") / last(\"count\") AS \"0.5\", 100*last(\"1\") / last(\"count\") AS \"1\" FROM \"MSDemo\".\"autogen\".\"minio_http_requests_duration_seconds\" WHERE time > :dashboardTime: and \"ipendpoint\"=:minio1: GROUP BY time(:interval:) FILL(null)",
+          "query": "SELECT 100*last(\"0.001\") / last(\"count\") AS \"0.001\", 100*last(\"0.003\") / last(\"count\") AS \"0.003\", 100*last(\"0.005\") / last(\"count\") AS \"0.005\", 100*last(\"0.1\") / last(\"count\") AS \"0.1\", 100*last(\"0.5\") / last(\"count\") AS \"0.5\", 100*last(\"1\") / last(\"count\") AS \"1\" FROM \"MSDemo\".\"autogen\".\"minio_http_requests_duration_seconds\" WHERE time > :dashboardTime: and \"flame_sfe\"=:minio1: GROUP BY time(:interval:) FILL(null)",
           "queryConfig": {
             "database": "",
             "measurement": "",
@@ -572,7 +572,7 @@
               "tags": []
             },
             "areTagsAccepted": false,
-            "rawText": "SELECT 100*last(\"0.001\") / last(\"count\") AS \"0.001\", 100*last(\"0.003\") / last(\"count\") AS \"0.003\", 100*last(\"0.005\") / last(\"count\") AS \"0.005\", 100*last(\"0.1\") / last(\"count\") AS \"0.1\", 100*last(\"0.5\") / last(\"count\") AS \"0.5\", 100*last(\"1\") / last(\"count\") AS \"1\" FROM \"MSDemo\".\"autogen\".\"minio_http_requests_duration_seconds\" WHERE time > :dashboardTime: and \"ipendpoint\"=:minio1: GROUP BY time(:interval:) FILL(null)",
+            "rawText": "SELECT 100*last(\"0.001\") / last(\"count\") AS \"0.001\", 100*last(\"0.003\") / last(\"count\") AS \"0.003\", 100*last(\"0.005\") / last(\"count\") AS \"0.005\", 100*last(\"0.1\") / last(\"count\") AS \"0.1\", 100*last(\"0.5\") / last(\"count\") AS \"0.5\", 100*last(\"1\") / last(\"count\") AS \"1\" FROM \"MSDemo\".\"autogen\".\"minio_http_requests_duration_seconds\" WHERE time > :dashboardTime: and \"flame_sfe\"=:minio1: GROUP BY time(:interval:) FILL(null)",
             "range": null,
             "shifts": null
           },
@@ -679,7 +679,7 @@
         "influxql": "SHOW TAG VALUES ON :database: FROM :measurement: WITH KEY=:tagKey:",
         "db": "MSDemo",
         "measurement": "cpu",
-        "tagKey": "ipendpoint",
+        "tagKey": "flame_sfe",
         "fieldKey": ""
       },
       "links": {
@@ -702,7 +702,7 @@
         "influxql": "SHOW TAG VALUES ON :database: FROM :measurement: WITH KEY=:tagKey:",
         "db": "MSDemo",
         "measurement": "cpu",
-        "tagKey": "ipendpoint",
+        "tagKey": "flame_sfe",
         "fieldKey": ""
       },
       "links": {
diff --git a/src/test/clmctest/dashboards/nginx_dash.json b/src/test/clmctest/dashboards/nginx_dash.json
index d60572664056e825819ae31a9cdbcface374a481..70a6a3c7035412e24a9f615905c2d0305e05d9f4 100644
--- a/src/test/clmctest/dashboards/nginx_dash.json
+++ b/src/test/clmctest/dashboards/nginx_dash.json
@@ -10,7 +10,7 @@
       "name": "nginx_ep_2: Requests / s",
       "queries": [
         {
-          "query": "SELECT non_negative_derivative(\"requests\") AS \"requests/sec\" FROM \"MSDemo\".\"autogen\".\"nginx\" WHERE time > :dashboardTime: AND \"ipendpoint\"='nginx_1_ep2'",
+          "query": "SELECT non_negative_derivative(\"requests\") AS \"requests/sec\" FROM \"MSDemo\".\"autogen\".\"nginx\" WHERE time > :dashboardTime: AND \"flame_sfe\"='nginx_1_ep2'",
           "queryConfig": {
             "database": "",
             "measurement": "",
@@ -22,7 +22,7 @@
               "tags": []
             },
             "areTagsAccepted": false,
-            "rawText": "SELECT non_negative_derivative(\"requests\") AS \"requests/sec\" FROM \"MSDemo\".\"autogen\".\"nginx\" WHERE time > :dashboardTime: AND \"ipendpoint\"='nginx_1_ep2'",
+            "rawText": "SELECT non_negative_derivative(\"requests\") AS \"requests/sec\" FROM \"MSDemo\".\"autogen\".\"nginx\" WHERE time > :dashboardTime: AND \"flame_sfe\"='nginx_1_ep2'",
             "range": null,
             "shifts": null
           },
@@ -120,7 +120,7 @@
       "name": "nginx_ep_2: Network TX",
       "queries": [
         {
-          "query": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"TX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"ipendpoint\"='nginx_1_ep2' GROUP BY time(1m)",
+          "query": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"TX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfe\"='nginx_1_ep2' GROUP BY time(1m)",
           "queryConfig": {
             "database": "",
             "measurement": "",
@@ -132,7 +132,7 @@
               "tags": []
             },
             "areTagsAccepted": false,
-            "rawText": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"TX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"ipendpoint\"='nginx_1_ep2' GROUP BY time(1m)",
+            "rawText": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"TX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfe\"='nginx_1_ep2' GROUP BY time(1m)",
             "range": null,
             "shifts": null
           },
@@ -230,7 +230,7 @@
       "name": "nginx_ep_2: Network RX",
       "queries": [
         {
-          "query": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"RX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"ipendpoint\"='nginx_1_ep2' GROUP By time(1m) fill(previous)",
+          "query": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"RX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfe\"='nginx_1_ep2' GROUP By time(1m) fill(previous)",
           "queryConfig": {
             "database": "",
             "measurement": "",
@@ -242,7 +242,7 @@
               "tags": []
             },
             "areTagsAccepted": false,
-            "rawText": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"RX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"ipendpoint\"='nginx_1_ep2' GROUP By time(1m) fill(previous)",
+            "rawText": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"RX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfe\"='nginx_1_ep2' GROUP By time(1m) fill(previous)",
             "range": null,
             "shifts": null
           },
@@ -340,7 +340,7 @@
       "name": "nginx_ep_1: Requests / s",
       "queries": [
         {
-          "query": "SELECT non_negative_derivative(\"requests\") AS \"requests/sec\" FROM \"MSDemo\".\"autogen\".\"nginx\" WHERE time > :dashboardTime: AND \"ipendpoint\"='nginx_1_ep1'",
+          "query": "SELECT non_negative_derivative(\"requests\") AS \"requests/sec\" FROM \"MSDemo\".\"autogen\".\"nginx\" WHERE time > :dashboardTime: AND \"flame_sfe\"='nginx_1_ep1'",
           "queryConfig": {
             "database": "",
             "measurement": "",
@@ -352,7 +352,7 @@
               "tags": []
             },
             "areTagsAccepted": false,
-            "rawText": "SELECT non_negative_derivative(\"requests\") AS \"requests/sec\" FROM \"MSDemo\".\"autogen\".\"nginx\" WHERE time > :dashboardTime: AND \"ipendpoint\"='nginx_1_ep1'",
+            "rawText": "SELECT non_negative_derivative(\"requests\") AS \"requests/sec\" FROM \"MSDemo\".\"autogen\".\"nginx\" WHERE time > :dashboardTime: AND \"flame_sfe\"='nginx_1_ep1'",
             "range": null,
             "shifts": null
           },
@@ -450,7 +450,7 @@
       "name": "nginx_ep_1: Network TX",
       "queries": [
         {
-          "query": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"TX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"ipendpoint\"='nginx_1_ep1' GROUP BY time(1m)",
+          "query": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"TX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfe\"='nginx_1_ep1' GROUP BY time(1m)",
           "queryConfig": {
             "database": "",
             "measurement": "",
@@ -462,7 +462,7 @@
               "tags": []
             },
             "areTagsAccepted": false,
-            "rawText": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"TX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"ipendpoint\"='nginx_1_ep1' GROUP BY time(1m)",
+            "rawText": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"TX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfe\"='nginx_1_ep1' GROUP BY time(1m)",
             "range": null,
             "shifts": null
           },
@@ -560,7 +560,7 @@
       "name": "nginx_ep_1: Network RX",
       "queries": [
         {
-          "query": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"RX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"ipendpoint\"='nginx_1_ep1' GROUP By time(1m) fill(previous)",
+          "query": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"RX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfe\"='nginx_1_ep1' GROUP By time(1m) fill(previous)",
           "queryConfig": {
             "database": "",
             "measurement": "",
@@ -572,7 +572,7 @@
               "tags": []
             },
             "areTagsAccepted": false,
-            "rawText": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"RX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"ipendpoint\"='nginx_1_ep1' GROUP By time(1m) fill(previous)",
+            "rawText": "SELECT derivative(max(\"bytes_sent\")) / 62914560 AS \"RX_Mb_per_second\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfe\"='nginx_1_ep1' GROUP By time(1m) fill(previous)",
             "range": null,
             "shifts": null
           },
@@ -670,7 +670,7 @@
       "name": "nginx_ep_2: CPU usage",
       "queries": [
         {
-          "query": "SELECT mean(\"cpu_usage\") AS \"mean_cpu_usage\" FROM \"MSDemo\".\"autogen\".\"procstat\" WHERE time > :dashboardTime: AND \"exe\"='nginx' AND \"ipendpoint\"='nginx_1_ep2' GROUP BY time(:interval:) FILL(null)",
+          "query": "SELECT mean(\"cpu_usage\") AS \"mean_cpu_usage\" FROM \"MSDemo\".\"autogen\".\"procstat\" WHERE time > :dashboardTime: AND \"exe\"='nginx' AND \"flame_sfe\"='nginx_1_ep2' GROUP BY time(:interval:) FILL(null)",
           "queryConfig": {
             "database": "MSDemo",
             "measurement": "procstat",
@@ -693,7 +693,7 @@
               "exe": [
                 "nginx"
               ],
-              "ipendpoint": [
+              "flame_sfe": [
                 "nginx_1_ep2"
               ]
             },
@@ -801,7 +801,7 @@
       "name": "nginx_ep_1: CPU usage",
       "queries": [
         {
-          "query": "SELECT mean(\"cpu_usage\") AS \"mean_cpu_usage\" FROM \"MSDemo\".\"autogen\".\"procstat\" WHERE time > :dashboardTime: AND \"exe\"='nginx' AND \"ipendpoint\"='nginx_1_ep1' GROUP BY time(:interval:) FILL(null)",
+          "query": "SELECT mean(\"cpu_usage\") AS \"mean_cpu_usage\" FROM \"MSDemo\".\"autogen\".\"procstat\" WHERE time > :dashboardTime: AND \"exe\"='nginx' AND \"flame_sfe\"='nginx_1_ep1' GROUP BY time(:interval:) FILL(null)",
           "queryConfig": {
             "database": "MSDemo",
             "measurement": "procstat",
@@ -824,7 +824,7 @@
               "exe": [
                 "nginx"
               ],
-              "ipendpoint": [
+              "flame_sfe": [
                 "nginx_1_ep1"
               ]
             },
diff --git a/src/test/clmctest/dashboards/sf_dash.json b/src/test/clmctest/dashboards/sf_dash.json
index 8430953819de013a3f0eca43cab74c30ad6e883e..a131ad0c75a47609329daebb7d9ad4557016ddaf 100644
--- a/src/test/clmctest/dashboards/sf_dash.json
+++ b/src/test/clmctest/dashboards/sf_dash.json
@@ -10,7 +10,7 @@
       "name": "Average MB/s sent/recv for service-function 2",
       "queries": [
         {
-          "query": "select derivative(total_RX_MB, 1m) / 60 as RX_MB_per_s, derivative(total_TX_MB, 1m) / 60 as TX_MB_per_s from (select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"sf\"=:sf2: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m)) ",
+          "query": "select derivative(total_RX_MB, 1m) / 60 as RX_MB_per_s, derivative(total_TX_MB, 1m) / 60 as TX_MB_per_s from (select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfp\"=:sf2: GROUP BY time(1m), \"flame_sfe\" FILL(null)) group by time(1m)) ",
           "queryConfig": {
             "database": "",
             "measurement": "",
@@ -22,7 +22,7 @@
               "tags": []
             },
             "areTagsAccepted": false,
-            "rawText": "select derivative(total_RX_MB, 1m) / 60 as RX_MB_per_s, derivative(total_TX_MB, 1m) / 60 as TX_MB_per_s from (select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"sf\"=:sf2: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m)) ",
+            "rawText": "select derivative(total_RX_MB, 1m) / 60 as RX_MB_per_s, derivative(total_TX_MB, 1m) / 60 as TX_MB_per_s from (select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfp\"=:sf2: GROUP BY time(1m), \"flame_sfe\" FILL(null)) group by time(1m)) ",
             "range": null,
             "shifts": null
           },
@@ -120,7 +120,7 @@
       "name": "Total MB sent/recv for service-function 2",
       "queries": [
         {
-          "query": "select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"sf\"=:sf2: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m)",
+          "query": "select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfp\"=:sf2: GROUP BY time(1m), \"flame_sfe\" FILL(null)) group by time(1m)",
           "queryConfig": {
             "database": "",
             "measurement": "",
@@ -132,7 +132,7 @@
               "tags": []
             },
             "areTagsAccepted": false,
-            "rawText": "select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"sf\"=:sf2: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m)",
+            "rawText": "select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfp\"=:sf2: GROUP BY time(1m), \"flame_sfe\" FILL(null)) group by time(1m)",
             "range": null,
             "shifts": null
           },
@@ -230,7 +230,7 @@
       "name": "Average MB/s sent/recv for service-function 1",
       "queries": [
         {
-          "query": "select derivative(total_RX_MB, 1m) / 60 as RX_MB_per_s, derivative(total_TX_MB, 1m) / 60 as TX_MB_per_s from (select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"sf\"=:sf1: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m)) ",
+          "query": "select derivative(total_RX_MB, 1m) / 60 as RX_MB_per_s, derivative(total_TX_MB, 1m) / 60 as TX_MB_per_s from (select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfp\"=:sf1: GROUP BY time(1m), \"flame_sfe\" FILL(null)) group by time(1m)) ",
           "queryConfig": {
             "database": "",
             "measurement": "",
@@ -242,7 +242,7 @@
               "tags": []
             },
             "areTagsAccepted": false,
-            "rawText": "select derivative(total_RX_MB, 1m) / 60 as RX_MB_per_s, derivative(total_TX_MB, 1m) / 60 as TX_MB_per_s from (select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"sf\"=:sf1: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m)) ",
+            "rawText": "select derivative(total_RX_MB, 1m) / 60 as RX_MB_per_s, derivative(total_TX_MB, 1m) / 60 as TX_MB_per_s from (select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfp\"=:sf1: GROUP BY time(1m), \"flame_sfe\" FILL(null)) group by time(1m)) ",
             "range": null,
             "shifts": null
           },
@@ -340,7 +340,7 @@
       "name": "Total MB sent/recv for service-function 1",
       "queries": [
         {
-          "query": "select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"sf\"=:sf1: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m)",
+          "query": "select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfp\"=:sf1: GROUP BY time(1m), \"flame_sfe\" FILL(null)) group by time(1m)",
           "queryConfig": {
             "database": "",
             "measurement": "",
@@ -352,7 +352,7 @@
               "tags": []
             },
             "areTagsAccepted": false,
-            "rawText": "select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"sf\"=:sf1: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m)",
+            "rawText": "select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"flame_sfp\"=:sf1: GROUP BY time(1m), \"flame_sfe\" FILL(null)) group by time(1m)",
             "range": null,
             "shifts": null
           },
@@ -459,7 +459,7 @@
         "influxql": "SHOW TAG VALUES ON :database: FROM :measurement: WITH KEY=:tagKey:",
         "db": "MSDemo",
         "measurement": "cpu",
-        "tagKey": "sf",
+        "tagKey": "flame_sfp",
         "fieldKey": ""
       },
       "links": {
@@ -482,7 +482,7 @@
         "influxql": "SHOW TAG VALUES ON :database: FROM :measurement: WITH KEY=:tagKey:",
         "db": "MSDemo",
         "measurement": "cpu",
-        "tagKey": "sf",
+        "tagKey": "flame_sfp",
         "fieldKey": ""
       },
       "links": {
diff --git a/src/test/clmctest/inputs/__init__.py b/src/test/clmctest/inputs/__init__.py
index 44f772595799f5fe338534918c95e23e08e80464..a93a4bf16d8eee8666c6a28c3e40306983804a29 100644
--- a/src/test/clmctest/inputs/__init__.py
+++ b/src/test/clmctest/inputs/__init__.py
@@ -1 +1 @@
-#!/usr/bin/python3
\ No newline at end of file
+#!/usr/bin/python3
diff --git a/src/test/clmctest/inputs/conftest.py b/src/test/clmctest/inputs/conftest.py
index 86218af4ab7734cf0efab8b2d439488769100e28..a4b2639906a719831de9eace1bbad4abe8db9400 100644
--- a/src/test/clmctest/inputs/conftest.py
+++ b/src/test/clmctest/inputs/conftest.py
@@ -19,46 +19,50 @@
 ##
 ##      Created By :            Michael Boniface
 ##      Created Date :          02-03-2018
+##      Updated By :            Nikolay Stanchev
+##      Updated Date :          30-08-2018
 ##      Created for Project :   FLAME
 """
 
 import pytest
 import time
-import yaml
 import json
 import pkg_resources
 from influxdb import InfluxDBClient
 
 
 @pytest.fixture(scope="module")
-def telegraf_agent_config(request):
+def telegraf_agent_config():
     """
     Reads the service configuration deployed for the streaming simulation test.
 
-    :param request: access the parameters of the fixture
     :return: the python object representing the read YAML file
     """
+
     rspec = pkg_resources.resource_filename('clmctest', 'rspec.json')
     print("\nrspec file: {0}".format(rspec))
+
     with open(rspec, 'r') as stream:
         data_loaded = json.load(stream)
+
     return data_loaded
 
 
-@pytest.fixture(params=[{'database': 'CLMCMetrics'}], scope='module')
+@pytest.fixture(params=[{'database': 'MS_Template_1'}], scope='module')
 def influxdb(telegraf_agent_config, request):
     """
     Creates an Influx DB client for the CLMC metrics database with an empty database
 
     :param telegraf_agent_config: the fixture returning the yaml configuration
     :param request: access the parameters of the fixture
+
     :return: the created Influx DB client
     """
    
-    db =  InfluxDBClient(host=telegraf_agent_config[0]['ip_address'], port=8086, database=request.param['database'], timeout=10)
+    db = InfluxDBClient(host=telegraf_agent_config[0]['ip_address'], port=8086, database=request.param['database'], timeout=10)
     db.drop_database(request.param['database'])
 
-    # wait 20 seconds for the 1st measurement to arrive from agents before returning
+    # wait 30 seconds for the 1st measurement to arrive from agents before returning
     time.sleep(30)
 
     return db
diff --git a/src/test/clmctest/inputs/test_rspec.py b/src/test/clmctest/inputs/test_rspec.py
index e56bac91e7335a0fcad399dc41f5ac00ae8c1fd5..ba0e06c9b2037a61f65a86bff819abbf91e6c4b6 100644
--- a/src/test/clmctest/inputs/test_rspec.py
+++ b/src/test/clmctest/inputs/test_rspec.py
@@ -19,6 +19,8 @@
 ##
 ##      Created By :            Michael Boniface
 ##      Created Date :          25-02-2018
+##      Updated By :            Nikolay Stanchev
+##      Updated Date :          30-08-2018
 ##      Created for Project :   FLAME
 """
 
diff --git a/src/test/clmctest/inputs/test_telegraf_agents.py b/src/test/clmctest/inputs/test_telegraf_agents.py
index 88238eaf62b6292561125fb92a4f67228da360f6..952eb35f0290c1df184b545842dc56bcfadeed29 100644
--- a/src/test/clmctest/inputs/test_telegraf_agents.py
+++ b/src/test/clmctest/inputs/test_telegraf_agents.py
@@ -19,55 +19,26 @@
 ##
 ##      Created By :            Michael Boniface
 ##      Created Date :          02-03-2018
+##      Updated By :            Nikolay Stanchev
+##      Updated Date :          30-08-2018
 ##      Created for Project :   FLAME
 """
 
 import pytest
-from subprocess import run
-from platform import system
-from influxdb import InfluxDBClient
-
-@pytest.mark.parametrize("service_name", [
-    ('clmc-service'),
-    ('apache'),
-    ('nginx'),
-    ('mongo'),
-    ('host'),
-    ('minio')
-    ])
-def test_service_name(telegraf_agent_config, service_name):
-    assert any(s['name'] == service_name for s in telegraf_agent_config), "{0} not in list of hosts".format(service_name)
-    
-def test_ping(telegraf_agent_config):
-    """
-    Pings each service to test for liveliness
-
-    :param streaming_sim_config: the configuration fixture collected from conftest.py
-    """
-
-    print("\n")  # blank line printed for formatting purposes
-
-    ping_count = 1
-    system_dependent_param = "-n" if system().lower() == "windows" else "-c"
-
-    for service in telegraf_agent_config:
-        command = ["ping", system_dependent_param, str(ping_count), service['ip_address']]
-        assert run(command).returncode == 0, "Service ping test failed for {0} with ip address {1}".format(service['name'], service['ip_address'])
-        print("\nSuccessfully passed ping test for service: {0}\n".format(service['name']))
 
 
 @pytest.mark.parametrize("measurement, query, expected_result", [
-    ('nginx', 'SELECT mean("requests") AS "mean" FROM "CLMCMetrics"."autogen"."nginx"', 0),
-    ('cpu', 'SELECT mean("usage_idle") AS "mean" FROM "CLMCMetrics"."autogen"."cpu"', 0),
-    ('mongodb', 'SELECT mean("net_in_bytes") AS "mean" FROM "CLMCMetrics"."autogen"."mongodb"', 0),
-    ('net', 'SELECT mean("bytes_sent") AS "mean" FROM "CLMCMetrics"."autogen"."net"', 0),
-    ('disk', 'SELECT mean("free") AS "mean" FROM "CLMCMetrics"."autogen"."disk"', 0),
-    ('mem', 'SELECT mean("free") AS "mean" FROM "CLMCMetrics"."autogen"."mem"', 0),
-    ('service_config_state', 'SELECT mean("loaded.active.running_count") AS "mean" FROM "CLMCMetrics"."autogen"."service_config_state" WHERE "resource"=\'nginx.service\'', 0),
+    ('nginx', 'SELECT mean("requests") AS "mean" FROM "MS_Template_1"."autogen"."nginx"', 0),
+    ('cpu', 'SELECT mean("usage_idle") AS "mean" FROM "MS_Template_1"."autogen"."cpu"', 0),
+    ('mongodb', 'SELECT mean("net_in_bytes") AS "mean" FROM "MS_Template_1"."autogen"."mongodb"', 0),
+    ('net', 'SELECT mean("bytes_sent") AS "mean" FROM "MS_Template_1"."autogen"."net"', 0),
+    ('disk', 'SELECT mean("free") AS "mean" FROM "MS_Template_1"."autogen"."disk"', 0),
+    ('mem', 'SELECT mean("free") AS "mean" FROM "MS_Template_1"."autogen"."mem"', 0),
+    ('service_config_state', 'SELECT mean("loaded.active.running_count") AS "mean" FROM "MS_Template_1"."autogen"."service_config_state" WHERE "resource"=\'nginx.service\'', 0),
     # Report MINIO's HTTP request response time (as a rolling difference of the sum total)
-    ('minio_http_requests_duration_seconds', 'SELECT difference(max("sum")) AS "mean" FROM "CLMCMetrics"."autogen"."minio_http_requests_duration_seconds" WHERE time > now() - 1h GROUP BY time(10s)',0),      
+    ('minio_http_requests_duration_seconds', 'SELECT difference(max("sum")) AS "mean" FROM "MS_Template_1"."autogen"."minio_http_requests_duration_seconds" WHERE time > now() - 1h GROUP BY time(10s)',0),
     # Report the average change in difference of MINIO's HTTP response time (the inner query determines a rolling difference between sampling periods [respTimeDiff])
-    ('minio_http_requests_duration_seconds', 'SELECT mean("respTimeDiff") AS "mean" FROM (SELECT difference(max("sum")) AS "respTimeDiff" FROM "CLMCMetrics"."autogen"."minio_http_requests_duration_seconds" WHERE time > now() - 1h GROUP BY time(10s))',0)              
+    ('minio_http_requests_duration_seconds', 'SELECT mean("respTimeDiff") AS "mean" FROM (SELECT difference(max("sum")) AS "respTimeDiff" FROM "MS_Template_1"."autogen"."minio_http_requests_duration_seconds" WHERE time > now() - 1h GROUP BY time(10s))',0)
     ])
 def test_all_inputs(influxdb, measurement, query, expected_result):
     """
@@ -79,7 +50,7 @@ def test_all_inputs(influxdb, measurement, query, expected_result):
     :param expected_result: the expected result from the query
     """
 
-    query_result = influxdb.query('SHOW measurements ON "CLMCMetrics"')
+    query_result = influxdb.query('SHOW measurements ON "MS_Template_1"')
     points = list(query_result.get_points())
     assert any(p['name'] == measurement for p in points), "{0} not in measurement list".format(measurement)
     
@@ -89,14 +60,25 @@ def test_all_inputs(influxdb, measurement, query, expected_result):
     assert actual_result > expected_result, "actual result {0} is not > expected result {1} for query {2}".format(actual_result, str(expected_result), query)
 
 
-@pytest.mark.parametrize("query, expected_result", 
-    [('filter query', 0),
-     ('filter query', 0),
-     ('filter query', 0)
-    ])
-def test_global_tag_filtering(influxdb, query, expected_result):
-    """Tests that the global tags are inserted correctly into the global configuration using the install CLMC script
+@pytest.mark.parametrize("query", [
+    'SELECT * FROM "MS_Template_1"."autogen"."nginx" GROUP BY *',
+    'SELECT * FROM "MS_Template_1"."autogen"."cpu" GROUP BY *',
+    'SELECT * FROM "MS_Template_1"."autogen"."mongodb" GROUP BY *',
+    'SELECT * FROM "MS_Template_1"."autogen"."net" GROUP BY *',
+    'SELECT * FROM "MS_Template_1"."autogen"."disk" GROUP BY *',
+    'SELECT * FROM "MS_Template_1"."autogen"."mem" GROUP BY *',
+    'SELECT * FROM "MS_Template_1"."autogen"."service_config_state" WHERE "resource"=\'nginx.service\' GROUP BY *',
+    'SELECT * FROM "MS_Template_1"."autogen"."minio_http_requests_duration_seconds" GROUP BY *'
+])
+def test_global_tag_filtering(influxdb, query):
+    """
+    Tests that the global tags are inserted correctly into the global configuration using the install CLMC agent script
+
+    :param influxdb: the influx db client fixture
+    :param query: the query to execute
     """
-    # run query
-    # check result
-    assert 1
+
+    query_result = influxdb.query(query).items()[0]
+    tags = query_result[0][1].keys()
+
+    assert set(tags).issuperset({"flame_sfc", "flame_sfci", "flame_sfp", "flame_sf", "flame_sfe", "flame_server", "flame_location"})
diff --git a/src/test/clmctest/monitoring/E2ESim.py b/src/test/clmctest/monitoring/E2ESim.py
deleted file mode 100644
index 4a3fa2d6d4d9768fed96188d571bc34c22ac0aad..0000000000000000000000000000000000000000
--- a/src/test/clmctest/monitoring/E2ESim.py
+++ /dev/null
@@ -1,159 +0,0 @@
-#!/usr/bin/python3
-"""
-## Copyright University of Southampton IT Innovation Centre, 2018
-##
-## Copyright in this software belongs to University of Southampton
-## IT Innovation Centre of Gamma House, Enterprise Road,
-## Chilworth Science Park, Southampton, SO16 7NS, UK.
-##
-## This software may not be used, sold, licensed, transferred, copied
-## or reproduced in whole or in part in any manner or form or in or
-## on any media by any person other than in accordance with the terms
-## of the Licence Agreement supplied with the software, or otherwise
-## without the prior written consent of the copyright owners.
-##
-## This software is distributed WITHOUT ANY WARRANTY, without even the
-## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-## PURPOSE, except where stated in the Licence Agreement supplied with
-## the software.
-##
-##      Created By :            Michael Boniface
-##      Created Date :          15-04-2018
-##      Updated By :            Nikolay Stanchev
-##      Updated Date :          16-04-2018
-##      Created for Project :   FLAME
-"""
-
-
-from influxdb import InfluxDBClient
-import clmctest.monitoring.LineProtocolGenerator as lp
-import urllib.parse
-import time
-import random
-
-
-class Simulator(object):
-    """
-    Simulator used to generate E2E measurements.
-    """
-
-    DATABASE = 'CLMCMetrics'  # default database name
-    DATABASE_URL = 'http://172.40.231.51:8086'  # default database url
-
-    TICK = 1  # a simulation tick represents 1s
-    SIMULATION_LENGTH = 120  # simulation time in seconds
-
-    def __init__(self, database_url=DATABASE_URL, database=DATABASE):
-        """
-        Initialises the simulator by creating a db client object and resetting the database.
-
-        :param database_url: db url
-        :param database: db name
-        """
-
-        url_object = urllib.parse.urlparse(database_url)
-        self.db_client = InfluxDBClient(host=url_object.hostname, port=url_object.port, database=database, timeout=10)
-
-        self.db_url = database_url
-        self.db_name = database
-
-        self._reset_db()
-
-    def _reset_db(self):
-        """
-        Reset the database using the already initialised db client object.
-        """
-
-        self.db_client.drop_database(self.db_name)
-        self.db_client.create_database(self.db_name)
-
-    def run(self):
-        """
-        Runs the simulation.
-        """
-
-        # all network delays start from 1ms, the dictionary stores the information to report
-        paths = [
-            {
-                'target': 'SR2',
-                'source': 'SR1',
-                'path_id': 'SR1---SR2',
-                'latency': 5,
-                'bandwidth': 100*1024*1024
-            },
-            {
-                'target': 'SR1',
-                'source': 'SR2',
-                'path_id': 'SR1---SR2',
-                'latency': 5,
-                'bandwidth': 100*1024*1024
-            },
-            {
-                'target': 'SR3',
-                'source': 'SR1',
-                'path_id': 'SR1---SR3',
-                'latency': 5,
-                'bandwidth': 100*1024*1024
-            },
-            {
-                'target': 'SR1',
-                'source': 'SR3',
-                'path_id': 'SR1---SR3',
-                'latency': 5,
-                'bandwidth': 100*1024*1024
-            }
-        ]
-
-        service_function_instances = [
-            {
-                'endpoint': 'ms1.flame.org',
-                'sf_instance': 'sr2.ms1.flame.org',  # TODO: what did we decide the sf_instance would look like?
-                'sfr': 'SR2',
-                'service_delay': 40,
-                'cpus': 1
-            },
-            {
-                'endpoint': 'ms1.flame.org',
-                'sf_instance': 'sr3.ms1.flame.org',  # TODO: what did we decide the sf_instance would look like?
-                'sfr': 'SR3',
-                'service_delay': 10,
-                'cpus': 4
-            }
-        ]
-
-        av_request_size = 10 * 1024 * 1024  # average request size measured by service function / Bytes
-        av_response_size = 1 * 1024  # average request size measured by service function / Bytes
-
-        # current time in seconds (to test the aggregation we write influx data points related to future time), so we start from the current time
-        start_time = int(time.time())
-
-        sim_time = start_time
-
-        sample_period_net = 1  # sample period for reporting network delays (measured in seconds)
-        sample_period_media = 5  # sample period for reporting media service delays (measured in seconds)
-
-        for i in range(0, self.SIMULATION_LENGTH):
-            # report one of the network delays every sample_period_net seconds
-            if i % sample_period_net == 0:
-                path = random.choice(paths)
-                self.db_client.write_points(
-                    lp.generate_network_delay_report(path['path_id'], path['source'], path['target'], path['latency'], path['bandwidth'], sim_time))
-
-                # increase/decrease the delay in every sample report (min delay is 1)
-                path['latency'] = max(1, path['latency'] + random.randint(-3, 3))
-
-            # report one of the service_function_instance response times every sample_period_media seconds
-            if i % sample_period_media == 0:
-                service = random.choice(service_function_instances)
-                self.db_client.write_points(lp.generate_service_delay_report(
-                    service['endpoint'], service['sf_instance'], service['sfr'], service['service_delay'], av_request_size, av_response_size, sim_time))
-
-            # increase the time by one simulation tick
-            sim_time += self.TICK
-
-        end_time = sim_time
-        print("Simulation finished. Start time: {0}, End time: {1}".format(start_time, end_time))
-
-
-if __name__ == "__main__":
-    Simulator().run()
diff --git a/src/test/clmctest/monitoring/LineProtocolGenerator.py b/src/test/clmctest/monitoring/LineProtocolGenerator.py
index 20b62120f9ceb9e0bfc44f318025380caebcfb68..3c9c93816dfcdb63b4b76de143d1c49cdf6fec9a 100644
--- a/src/test/clmctest/monitoring/LineProtocolGenerator.py
+++ b/src/test/clmctest/monitoring/LineProtocolGenerator.py
@@ -29,66 +29,6 @@ import uuid
 from random import randint
 
 
-def generate_network_delay_report(path_id, source_sfr, target_sfr, latency, bandwidth, time):
-    """
-    Generates a platform measurement about the network delay between two specific service routers.
-
-    :param path_id: the identifier of the path between the two service routers
-    :param source_sfr: the source service router
-    :param target_sfr: the target service router
-    :param latency: the e2e network delay for traversing the path between the two service routers
-    :param bandwidth: the bandwidth of the path (minimum of bandwidths of the links it is composed of)
-    :param time: the measurement timestamp
-    :return: a list of dict-formatted reports to post on influx
-    """
-
-    result = [{"measurement": "network_delays",
-               "tags": {
-                   "path": path_id,
-                   "source": source_sfr,
-                   "target": target_sfr
-               },
-               "fields": {
-                   "latency": latency,
-                   "bandwidth": bandwidth
-               },
-               "time": _getNSTime(time)
-               }]
-
-    return result
-
-
-def generate_service_delay_report(endpoint, sf_instance, sfr, response_time, request_size, response_size, time):
-    """
-    Generates a service measurement about the media service response time.
-
-    :param endpoint: endpoint of the media component
-    :param sf_instance: service function instance
-    :param sfr: the service function router that connects the endpoint of the SF instance to the FLAME network
-    :param response_time: the media service response time (this is not the response time for the whole round-trip, but only for the processing part of the media service component)
-    :param request_size: the size of the request received by the service in Bytes
-    :param response_size: the size of the response received by the service in Bytes
-    :param time: the measurement timestamp
-    :return: a list of dict-formatted reports to post on influx
-    """
-
-    result = [{"measurement": "service_delays",
-               "tags": {
-                   "endpoint": endpoint,
-                   "sf_instance": sf_instance,
-                   "sfr": sfr
-               },
-               "fields": {
-                   "response_time": response_time,
-                   "request_size": request_size,
-                   "response_size": response_size
-               },
-               "time": _getNSTime(time)
-               }]
-
-    return result
-
-
 # Reports TX and RX, scaling on requested quality
 def generate_network_report(recieved_bytes, sent_bytes, time):
     result = [{"measurement": "net_port_io",
diff --git a/src/test/clmctest/monitoring/__init__.py b/src/test/clmctest/monitoring/__init__.py
index 44f772595799f5fe338534918c95e23e08e80464..a93a4bf16d8eee8666c6a28c3e40306983804a29 100644
--- a/src/test/clmctest/monitoring/__init__.py
+++ b/src/test/clmctest/monitoring/__init__.py
@@ -1 +1 @@
-#!/usr/bin/python3
\ No newline at end of file
+#!/usr/bin/python3
diff --git a/src/test/clmctest/monitoring/conftest.py b/src/test/clmctest/monitoring/conftest.py
index 34457d87e0eaeeac92d0be57d914d77e63d15fb5..ef8cf2b77752191db4b753dca151215ff071cc1e 100644
--- a/src/test/clmctest/monitoring/conftest.py
+++ b/src/test/clmctest/monitoring/conftest.py
@@ -19,6 +19,8 @@
 ##
 ##      Created By :            Michael Boniface
 ##      Created Date :          25-02-2018
+##      Updated By :            Nikolay Stanchev
+##      Updated Date :          30-08-2018
 ##      Created for Project :   FLAME
 """
 
@@ -27,7 +29,6 @@ import json
 import pkg_resources
 from influxdb import InfluxDBClient
 from clmctest.monitoring.StreamingSim import Sim
-from clmctest.monitoring.E2ESim import Simulator
 
 
 @pytest.fixture(scope="module")
@@ -46,7 +47,7 @@ def streaming_sim_config():
     return data_loaded
 
 
-@pytest.fixture(params=[{'database': 'CLMCMetrics'}], scope='module')
+@pytest.fixture(params=[{'database': 'media_service_A'}], scope='module')
 def influx_db(streaming_sim_config, request):
     """
     Creates an Influx DB client for the CLMC metrics database
@@ -70,29 +71,22 @@ def simulator(streaming_sim_config):
 
     influx_url = "http://" + streaming_sim_config[0]['ip_address'] + ":8086"
 
+    agent1_url, agent2_url = None, None
     for service in streaming_sim_config:
         if service['name'] == "ipendpoint1":
-            influx_db_name = service['database_name']
+            influx_db_name = service['sfc_id']
             agent1_url = "http://" + service['ip_address'] + ":8186"
         elif service['name'] == "ipendpoint2":
             agent2_url = "http://" + service['ip_address'] + ":8186"
 
+        if agent1_url is not None and agent2_url is not None:
+            break
+
+    assert agent1_url is not None, "Configuration error for ipendpoint1"
+    assert agent2_url is not None, "Configuration error for ipendpoint2"
+
     simulator = Sim(influx_url, influx_db_name, agent1_url, agent2_url)
 
     simulator.reset()
 
     return simulator
-
-
-@pytest.fixture(scope="module")
-def e2e_simulator(streaming_sim_config):
-    """
-    A fixture to obtain a simulator instance with the configuration parameters.
-
-    :param streaming_sim_config: the configuration object
-    :return: an instance of the E2E simulator
-    """
-
-    influx_url = "http://" + streaming_sim_config[0]['ip_address'] + ":8086"
-
-    return Simulator(database_url=influx_url)
diff --git a/src/test/clmctest/monitoring/test_e2eresults.py b/src/test/clmctest/monitoring/test_e2eresults.py
deleted file mode 100644
index 9c957d684c677c39539c22570eb328a155a1af16..0000000000000000000000000000000000000000
--- a/src/test/clmctest/monitoring/test_e2eresults.py
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/python3
-"""
-## © University of Southampton IT Innovation Centre, 2018
-##
-## Copyright in this software belongs to University of Southampton
-## IT Innovation Centre of Gamma House, Enterprise Road,
-## Chilworth Science Park, Southampton, SO16 7NS, UK.
-##
-## This software may not be used, sold, licensed, transferred, copied
-## or reproduced in whole or in part in any manner or form or in or
-## on any media by any person other than in accordance with the terms
-## of the Licence Agreement supplied with the software, or otherwise
-## without the prior written consent of the copyright owners.
-##
-## This software is distributed WITHOUT ANY WARRANTY, without even the
-## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-## PURPOSE, except where stated in the Licence Agreement supplied with
-## the software.
-##
-##      Created By :            Nikolay Stanchev
-##      Created Date :          17-04-2018
-##      Created for Project :   FLAME
-"""
-
-import pytest
-import time
-import requests
-import urllib.parse
-
-
-class TestE2ESimulation(object):
-    """
-    A testing class used to group all the tests related to the E2E simulation data
-    """
-
-    @pytest.fixture(scope='class', autouse=True)
-    def run_simulator(self, e2e_simulator):
-        """
-        A fixture, which runs the simulation before running the tests.
-
-        :param e2e_simulator: the simulator for the end-to-end data
-        """
-
-        # Configure the aggregator through the CLMC service
-        influx_url = urllib.parse.urlparse(e2e_simulator.db_url)
-        aggregator_control_url = "http://{0}:9080/aggregator/control".format(influx_url.hostname)
-        aggregator_config_url = "http://{0}:9080/aggregator/config".format(influx_url.hostname)
-
-        print("Configuring aggregator with request to {0} ...".format(aggregator_config_url))
-        r = requests.put(aggregator_config_url, json={"aggregator_report_period": 5, "aggregator_database_name": e2e_simulator.db_name, "aggregator_database_url": e2e_simulator.db_url})
-        assert r.status_code == 200
-
-        print("Running simulation, please wait...")
-        e2e_simulator.run()
-
-        print("Starting aggregator with request to {0}...".format(aggregator_control_url))
-        r = requests.put(aggregator_control_url, json={"action": "start"})  # start the aggregator through the CLMC service
-        assert r.status_code == 200
-
-        print("Waiting for INFLUX to finish receiving data...")
-        time.sleep(e2e_simulator.SIMULATION_LENGTH)  # wait for data to finish arriving at the INFLUX database
-        print("... simulation data fixture finished")
-
-        print("... stopping aggregator with request to {0}...".format(aggregator_control_url))
-        r = requests.put(aggregator_control_url, json={"action": "stop"})  # stop the aggregator through the CLMC service
-        assert r.status_code == 200
-
-    @pytest.mark.parametrize("query, expected_result, equal_comparison", [
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."network_delays"',
-         {"time": "1970-01-01T00:00:00Z", "count_latency": 120, "count_bandwidth": 120}, True),
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."service_delays"',
-         {"time": "1970-01-01T00:00:00Z", "count_response_time": 24, "count_request_size": 24, "count_response_size": 24}, True),
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."e2e_delays"',
-         {"time": "1970-01-01T00:00:00Z", "count_delay_forward": 40, "count_delay_reverse": 40, "count_delay_service": 40,
-          "count_avg_request_size": 40, "count_avg_response_size": 40, "count_avg_bandwidth": 40}, False),
-        ])
-    def test_simulation(self, influx_db, query, expected_result, equal_comparison):
-        """
-        This is the entry point of the test. This method will be found and executed when the module is ran using pytest
-
-        :param query: the query to execute (value obtained from the pytest parameter decorator)
-        :param expected_result: the result expected from executing the query (value obtained from the pytest parameter decorator)
-        :param influx_db the import db client fixture - imported from contest.py
-        """
-
-        # pytest automatically goes through all queries under test, declared in the parameters decorator
-        print("\n")  # prints a blank line for formatting purposes
-
-        # the raise_errors=False argument is given so that we could actually test that the DB didn't return any errors instead of raising an exception
-        query_result = influx_db.query(query, raise_errors=False)
-
-        # test the error attribute of the result is None, that is no error is returned from executing the DB query
-        assert query_result.error is None, "An error was encountered while executing query {0}.".format(query)
-
-        # get the dictionary of result points; the next() function just gets the first element of the query results generator (we only expect one item in the generator)
-        actual_result = next(query_result.get_points())
-
-        # check if we want to compare for equality or for '>='
-        if equal_comparison:
-            assert expected_result == actual_result, "E2E Simulation test failure"
-        else:
-            for key in expected_result:
-                assert actual_result[key] >= expected_result[key], "E2E Simulation test failure"
diff --git a/src/test/clmctest/monitoring/test_rspec.py b/src/test/clmctest/monitoring/test_rspec.py
index 999b98c4e8ee6c57505e1f2059f194e33b3a19a7..315ebe562b7718b5d587471d472903e6d65597b2 100644
--- a/src/test/clmctest/monitoring/test_rspec.py
+++ b/src/test/clmctest/monitoring/test_rspec.py
@@ -19,6 +19,8 @@
 ##
 ##      Created By :            Michael Boniface
 ##      Created Date :          25-02-2018
+##      Updated By :            Nikolay Stanchev
+##      Updated Date :          30-08-2018
 ##      Created for Project :   FLAME
 """
 
@@ -56,7 +58,10 @@ def test_ping(streaming_sim_config):
     ping_count = 1
     system_dependent_param = "-n" if system().lower() == "windows" else "-c"
 
+    services = {'clmc-service', 'ipendpoint1', 'ipendpoint2'}
+
     for service in streaming_sim_config:
-        command = ["ping", system_dependent_param, str(ping_count), service['ip_address']]
-        assert run(command).returncode == 0, "Service ping test failed for {0} with ip address {1}".format(service['name'], service['ip_address'])
-        print("\nSuccessfully passed ping test for service: {0}\n".format(service['name']))
+        if service["name"] in services:  # test only the scenario specific services
+            command = ["ping", system_dependent_param, str(ping_count), service['ip_address']]
+            assert run(command).returncode == 0, "Service ping test failed for {0} with ip address {1}".format(service['name'], service['ip_address'])
+            print("\nSuccessfully passed ping test for service: {0}\n".format(service['name']))
diff --git a/src/test/clmctest/monitoring/test_simresults.py b/src/test/clmctest/monitoring/test_simresults.py
index 9d8670e7957e417554916f697e4e20af8fcf04be..46feff406a4b14c4d3d6db8fe7639f338bdcbb66 100644
--- a/src/test/clmctest/monitoring/test_simresults.py
+++ b/src/test/clmctest/monitoring/test_simresults.py
@@ -44,71 +44,71 @@ class TestSimulation(object):
         print( "... simulation data fixture finished" )
 
     @pytest.mark.parametrize("query, expected_result", [
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."cpu_usage"',
+        ('SELECT count(*) FROM "media_service_A"."autogen"."cpu_usage"',
          {"time": "1970-01-01T00:00:00Z", "count_cpu_active_time": 7200, "count_cpu_idle_time": 7200, "count_cpu_usage": 7200}),
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."ipendpoint_route"',
+        ('SELECT count(*) FROM "media_service_A"."autogen"."ipendpoint_route"',
          {"time": "1970-01-01T00:00:00Z", "count_http_requests_fqdn_m": 7200, "count_network_fqdn_latency": 7200}),
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."mpegdash_service"',
+        ('SELECT count(*) FROM "media_service_A"."autogen"."mpegdash_service"',
          {"time": "1970-01-01T00:00:00Z", "count_avg_response_time": 7200, "count_peak_response_time": 7200, "count_requests": 7200}),
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."net_port_io"',
+        ('SELECT count(*) FROM "media_service_A"."autogen"."net_port_io"',
          {"time": "1970-01-01T00:00:00Z", "count_RX_BYTES_PORT_M": 7200, "count_TX_BYTES_PORT_M": 7200}),
 
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
+        ('SELECT count(*) FROM "media_service_A"."autogen"."endpoint_config" WHERE "flame_sfe"=\'endpoint1.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "count_current_state_time": 3639, "count_unplaced_sum": 3639, "count_unplaced_mst": 3639, "count_placing_sum": 3639, "count_placing_mst": 3639, "count_placed_sum": 3639, "count_placed_mst": 3639, "count_booting_sum": 3639, "count_booting_mst": 3639, "count_booted_sum": 3639,
           "count_booted_mst": 3639, "count_connecting_sum": 3639, "count_connecting_mst": 3639, "count_connected_sum": 3639, "count_connected_mst": 3639, "count_cpus": 3639, "count_memory": 3639, "count_storage": 3639}),
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
+        ('SELECT count(*) FROM "media_service_A"."autogen"."endpoint_config" WHERE "flame_sfe"=\'endpoint2.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "count_current_state_time": 3639, "count_unplaced_sum": 3639, "count_unplaced_mst": 3639, "count_placing_sum": 3639, "count_placing_mst": 3639, "count_placed_sum": 3639, "count_placed_mst": 3639, "count_booting_sum": 3639, "count_booting_mst": 3639, "count_booted_sum": 3639,
           "count_booted_mst": 3639, "count_connecting_sum": 3639, "count_connecting_mst": 3639, "count_connected_sum": 3639, "count_connected_mst": 3639, "count_cpus": 3639, "count_memory": 3639, "count_storage": 3639}),
 
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
+        ('SELECT count(*) FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE "flame_sfe"=\'endpoint1.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "count_current_state_time": 3607, "count_running_mst": 3607, "count_running_sum": 3607, "count_starting_mst": 3607, "count_starting_sum": 3607, "count_stopped_mst": 3607, "count_stopped_sum": 3607, "count_stopping_mst": 3607, "count_stopping_sum": 3607}),
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
+        ('SELECT count(*) FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE "flame_sfe"=\'endpoint2.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "count_current_state_time": 3607, "count_running_mst": 3607, "count_running_sum": 3607, "count_starting_mst": 3607, "count_starting_sum": 3607, "count_stopped_mst": 3607, "count_stopped_sum": 3607, "count_stopping_mst": 3607, "count_stopping_sum": 3607}),
 
-        ('SELECT mean(unplaced_mst) as "unplaced_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE unplaced_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
+        ('SELECT mean(unplaced_mst) as "unplaced_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE unplaced_mst <> 0 and "flame_sfe"=\'endpoint1.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "unplaced_mst": 0.7}),
-        ('SELECT mean(placing_mst) as "placing_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE placing_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
+        ('SELECT mean(placing_mst) as "placing_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE placing_mst <> 0 and "flame_sfe"=\'endpoint1.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "placing_mst": 9.4}),
-        ('SELECT mean(placed_mst) as "placed_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE placed_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
+        ('SELECT mean(placed_mst) as "placed_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE placed_mst <> 0 and "flame_sfe"=\'endpoint1.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "placed_mst": 1.7000000000000002}),
-        ('SELECT mean(booting_mst) as "booting_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE booting_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
+        ('SELECT mean(booting_mst) as "booting_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE booting_mst <> 0 and "flame_sfe"=\'endpoint1.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "booting_mst": 9.6}),
-        ('SELECT mean(booted_mst) as "booted_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE booted_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
+        ('SELECT mean(booted_mst) as "booted_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE booted_mst <> 0 and "flame_sfe"=\'endpoint1.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "booted_mst": 2.1}),
-        ('SELECT mean(connecting_mst) as "connecting_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE connecting_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
+        ('SELECT mean(connecting_mst) as "connecting_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE connecting_mst <> 0 and "flame_sfe"=\'endpoint1.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "connecting_mst":  10.2}),
-        ('SELECT mean(connected_mst) as "connected_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE connected_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
+        ('SELECT mean(connected_mst) as "connected_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE connected_mst <> 0 and "flame_sfe"=\'endpoint1.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "connected_mst": 3605.0}),
-        ('SELECT mean(unplaced_mst) as "unplaced_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE unplaced_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
+        ('SELECT mean(unplaced_mst) as "unplaced_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE unplaced_mst <> 0 and "flame_sfe"=\'endpoint2.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "unplaced_mst": 0.7}),
-        ('SELECT mean(placing_mst) as "placing_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE placing_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
+        ('SELECT mean(placing_mst) as "placing_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE placing_mst <> 0 and "flame_sfe"=\'endpoint2.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "placing_mst": 9.4}),
-        ('SELECT mean(placed_mst) as "placed_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE placed_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
+        ('SELECT mean(placed_mst) as "placed_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE placed_mst <> 0 and "flame_sfe"=\'endpoint2.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "placed_mst": 1.7000000000000002}),
-        ('SELECT mean(booting_mst) as "booting_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE booting_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
+        ('SELECT mean(booting_mst) as "booting_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE booting_mst <> 0 and "flame_sfe"=\'endpoint2.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "booting_mst": 9.6}),
-        ('SELECT mean(booted_mst) as "booted_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE booted_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
+        ('SELECT mean(booted_mst) as "booted_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE booted_mst <> 0 and "flame_sfe"=\'endpoint2.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "booted_mst": 2.1}),
-        ('SELECT mean(connecting_mst) as "connecting_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE connecting_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
+        ('SELECT mean(connecting_mst) as "connecting_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE connecting_mst <> 0 and "flame_sfe"=\'endpoint2.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "connecting_mst":  10.2}),
-        ('SELECT mean(connected_mst) as "connected_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE connected_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
+        ('SELECT mean(connected_mst) as "connected_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE connected_mst <> 0 and "flame_sfe"=\'endpoint2.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "connected_mst": 3605.0}),
 
-        ('SELECT mean(stopped_sum) as "stopped_sum" FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE stopped_sum <> 0',
+        ('SELECT mean(stopped_sum) as "stopped_sum" FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE stopped_sum <> 0',
          {"time": "1970-01-01T00:00:00Z", "stopped_sum": 0.2}),
-        ('SELECT mean(stopped_mst) as "stopped_mst" FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE stopped_mst <> 0',
+        ('SELECT mean(stopped_mst) as "stopped_mst" FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE stopped_mst <> 0',
          {"time": "1970-01-01T00:00:00Z", "stopped_mst": 0.2}),
-        ('SELECT mean(starting_sum) as "starting_sum" FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE starting_sum <> 0',
+        ('SELECT mean(starting_sum) as "starting_sum" FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE starting_sum <> 0',
          {"time": "1970-01-01T00:00:00Z", "starting_sum": 5.5}),
-        ('SELECT mean(starting_mst) as "starting_mst" FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE starting_mst <> 0',
+        ('SELECT mean(starting_mst) as "starting_mst" FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE starting_mst <> 0',
          {"time": "1970-01-01T00:00:00Z", "starting_mst": 5.5}),
-        ('SELECT mean(running_sum) as "running_sum" FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE running_sum <> 0',
+        ('SELECT mean(running_sum) as "running_sum" FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE running_sum <> 0',
          {"time": "1970-01-01T00:00:00Z", "running_sum": 3602.1000000000004}),
-        ('SELECT mean(running_mst) as "running_mst" FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE running_mst <> 0',
+        ('SELECT mean(running_mst) as "running_mst" FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE running_mst <> 0',
          {"time": "1970-01-01T00:00:00Z", "running_mst": 3602.1000000000004}),
-        ('SELECT mean(stopping_sum) as "stopping_sum" FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE stopping_sum <> 0',
+        ('SELECT mean(stopping_sum) as "stopping_sum" FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE stopping_sum <> 0',
          {"time": "1970-01-01T00:00:00Z", "stopping_sum": 1.1}),
-        ('SELECT mean(stopping_mst) as "stopping_mst" FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE stopping_mst <> 0',
+        ('SELECT mean(stopping_mst) as "stopping_mst" FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE stopping_mst <> 0',
          {"time": "1970-01-01T00:00:00Z", "stopping_mst": 1.1}),
     ])
     def test_simulation(self, influx_db, query, expected_result):
diff --git a/src/test/clmctest/rspec.json b/src/test/clmctest/rspec.json
index 97d18cd6cb29c5eb1d267c6308ef0efabafe8e62..5cc7a75aa3cda35da6270fe2198d3f0e31c5ff80 100644
--- a/src/test/clmctest/rspec.json
+++ b/src/test/clmctest/rspec.json
@@ -1,14 +1,9 @@
 [{
     "name": "clmc-service",
     "ip_address": "172.40.231.51",
-    "influxdb_url": "http://172.40.231.51:8086",
-    "database_name": "CLMCMetrics",
-    "report_period": "25",  
     "forward_ports": [
-        { "guest": 8086, "host": 8086 },
-        { "guest": 8888, "host": 8888 },
-        { "guest": 9092, "host": 9092 },
-        { "guest": 9080, "host": 9080 }    
+        { "guest": 80, "host": 80 },
+        { "guest": 7687, "host": 7687 }
     ]
 },
 {
@@ -16,26 +11,22 @@
     "ip_address": "172.40.231.150",
     "location": "DC1",
     "sfc_id": "MS_Template_1",
-    "sfc_id_instance": "MS_I1",
-    "sf_id": "apache",
-    "sf_id_instance": "adaptive_streaming_I1",
-    "ipendpoint_id": "adaptive_streaming_I1_apache1",
-    "sr_id": "service_router",
-    "influxdb_url": "http://172.40.231.51:8086",
-    "database_name": "CLMCMetrics"
+    "sfc_instance_id": "MS_I1",
+    "sf_package_id": "apache",
+    "sf_id": "adaptive_streaming_I1",
+    "sf_endpoint_id": "adaptive_streaming_I1_apache1",
+    "influxdb_url": "http://172.40.231.51:8086"
 },
 {
     "name": "nginx",
     "ip_address": "172.40.231.151",
     "location": "DC1",
     "sfc_id": "MS_Template_1",
-    "sfc_id_instance": "MS_I1",
-    "sf_id": "nginx",
-    "sf_id_instance": "adaptive_streaming_nginx_I1",
-    "ipendpoint_id": "adaptive_streaming_nginx_I1_apache1",
-    "sr_id": "service_router",
-    "influxdb_url": "http://172.40.231.51:8086",
-    "database_name": "CLMCMetrics"
+    "sfc_instance_id": "MS_I1",
+    "sf_package_id": "nginx",
+    "sf_id": "adaptive_streaming_nginx_I1",
+    "sf_endpoint_id": "adaptive_streaming_nginx_I1_apache1",
+    "influxdb_url": "http://172.40.231.51:8086"
 },
 {
     "name": "mongo",
@@ -43,13 +34,11 @@
     "ip_address": "172.40.231.152",
     "location": "DC1",
     "sfc_id": "MS_Template_1",
-    "sfc_id_instance": "MS_I1",
-    "sf_id": "mongo",
-    "sf_id_instance": "metadata_database_I1",
-    "ipendpoint_id": "metadata_database_I1_apache1",
-    "sr_id": "service_router",
-    "influxdb_url": "http://172.40.231.51:8086",
-    "database_name": "CLMCMetrics"
+    "sfc_instance_id": "MS_I1",
+    "sf_package_id": "mongo",
+    "sf_id": "metadata_database_I1",
+    "sf_endpoint_id": "metadata_database_I1_apache1",
+    "influxdb_url": "http://172.40.231.51:8086"
 },
 {
     "name": "host",    
@@ -57,52 +46,44 @@
     "ip_address": "172.40.231.154",
     "location": "DC1",
     "sfc_id": "MS_Template_1",
-    "sfc_id_instance": "MS_I1",
-    "sf_id": "host",
-    "sf_id_instance": "adaptive_streaming_I1",
-    "ipendpoint_id": "adaptive_streaming_I1_apache1",
-    "sr_id": "service_router",
-    "influxdb_url": "http://172.40.231.51:8086",
-    "database_name": "CLMCMetrics"
+    "sfc_instance_id": "MS_I1",
+    "sf_package_id": "host",
+    "sf_id": "adaptive_streaming_I1",
+    "sf_endpoint_id": "adaptive_streaming_I1_apache1",
+    "influxdb_url": "http://172.40.231.51:8086"
 },
 {
     "name": "minio",    
     "ip_address": "172.40.231.155",
     "location": "DC1",
     "sfc_id": "MS_Template_1",
-    "sfc_id_instance": "MS_I1",
-    "sf_id": "minio",
-    "sf_id_instance": "adaptive_streaming_I1",
-    "ipendpoint_id": "adaptive_streaming_I1_minio",
-    "sr_id": "service_router",
-    "influxdb_url": "http://172.40.231.51:8086",
-    "database_name": "CLMCMetrics"
+    "sfc_instance_id": "MS_I1",
+    "sf_package_id": "minio",
+    "sf_id": "adaptive_streaming_I1",
+    "sf_endpoint_id": "adaptive_streaming_I1_minio",
+    "influxdb_url": "http://172.40.231.51:8086"
 },
 {
     "name": "ipendpoint1",
     "ip_address": "172.40.231.170",
     "location": "nova",
     "sfc_id": "media_service_A",
-    "sfc_id_instance": "StackID",
-    "sf_id": "ipendpoint",
-    "sf_id_instance": "ms-A.ict-flame.eu",
-    "ipendpoint_id": "endpoint1.ms-A.ict-flame.eu",
-    "sr_id": "service_router",
-    "influxdb_url": "http://172.40.231.51:8086",
-    "database_name": "CLMCMetrics"
+    "sfc_instance_id": "StackID",
+    "sf_package_id": "ipendpoint",
+    "sf_id": "ms-A.ict-flame.eu",
+    "sf_endpoint_id": "endpoint1.ms-A.ict-flame.eu",
+    "influxdb_url": "http://172.40.231.51:8086"
 },
 {
     "name": "ipendpoint2", 
     "ip_address": "172.40.231.171",
     "location": "nova",
     "sfc_id": "media_service_A",
-    "sfc_id_instance": "StackID",
-    "sf_id": "ipendpoint",
-    "sf_id_instance": "ms-A.ict-flame.eu",
-    "ipendpoint_id": "endpoint2.ms-A.ict-flame.eu",
-    "sr_id": "service_router",
-    "influxdb_url": "http://172.40.231.51:8086",
-    "database_name": "CLMCMetrics"
+    "sfc_instance_id": "StackID",
+    "sf_package_id": "ipendpoint",
+    "sf_id": "ms-A.ict-flame.eu",
+    "sf_endpoint_id": "endpoint2.ms-A.ict-flame.eu",
+    "influxdb_url": "http://172.40.231.51:8086"
 },
 {
     "name": "test-runner",
diff --git a/src/test/clmctest/scripts/__init__.py b/src/test/clmctest/scripts/__init__.py
index 44f772595799f5fe338534918c95e23e08e80464..a93a4bf16d8eee8666c6a28c3e40306983804a29 100644
--- a/src/test/clmctest/scripts/__init__.py
+++ b/src/test/clmctest/scripts/__init__.py
@@ -1 +1 @@
-#!/usr/bin/python3
\ No newline at end of file
+#!/usr/bin/python3
diff --git a/src/test/clmctest/scripts/test_config_telegraf.py b/src/test/clmctest/scripts/test_config_telegraf.py
index acd8866fc62f7fac51f8c6bf0f844b032d98c6f3..ca23772481d256e5b3974319c649ee3becbe1240 100644
--- a/src/test/clmctest/scripts/test_config_telegraf.py
+++ b/src/test/clmctest/scripts/test_config_telegraf.py
@@ -19,37 +19,38 @@
 ##
 ##      Created By :            Michael Boniface
 ##      Created Date :          20-03-2018
+##      Updated By :            Nikolay Stanchev
+##      Updated Date :          30-08-2018
 ##      Created for Project :   FLAME
 """
 
-import pytest
 import subprocess
 
+
 def test_write_telegraf_conf():
+    # test telegraf monitoring configuration
+    TELEGRAF_CONF_DIR = "/etc/telegraf"
+    LOCATION = "DC1"
+    SFC_ID = "media_service_A"
+    SFC_INSTANCE_ID = "media_service_A_instance"
+    SF_PACKAGE_ID = "streaming_service"
+    SF_ID = "streaming_service_instance"
+    SF_ENDPOINT_ID = "endpoint"
+    INFLUXDB_URL = "http://172.29.236.10"
+    DATABASE_NAME = SFC_ID
 
-  # test telegraf monitoring configuration
-  TELEGRAF_CONF_DIR="/etc/telegraf"
-  LOCATION="DC1"
-  SFC_ID="media_service_A"
-  SFC_ID_INSTANCE="media_service_A_instance"
-  SF_ID="streaming_service"
-  SF_ID_INSTANCE="streaming_service_instance"
-  IP_ENDPOINT_ID="endpoint"
-  SR_ID="service_router"  
-  INFLUXDB_URL="http://172.29.236.10"
-  DATABASE_NAME="experimentation_database"  
-
-  try:
     # mk telegraf conf directory
+    cmd = 'sudo mkdir -p /etc/telegraf'
+    (out, err, code) = run_command(cmd)
+    assert code == 0, "Failed to create telegraf conf dir : " + str(code) + ", cmd=" + cmd
 
-    (out, err, code) = run_command('sudo mkdir -p /etc/telegraf')
-    assert code == 0, "Failed to create telegraf conf dir : " + str(code) + ", cmd=" + cmd    
-
-    (out, err, code) = run_command('sudo mkdir -p /etc/telegraf/telegraf.d')
-    assert code == 0, "Failed to create telegraf include dir : " + str(code) + ", cmd=" + cmd  
+    cmd = 'sudo mkdir -p /etc/telegraf/telegraf.d'
+    (out, err, code) = run_command(cmd)
+    assert code == 0, "Failed to create telegraf include dir : " + str(code) + ", cmd=" + cmd
 
-    # run write config template  script with no telegraf.d directory
-    (out, err, code) = run_command('sudo cp /vagrant/scripts/clmc-agent/telegraf.conf /etc/telegraf/')
+    # run write config template script with no telegraf.d directory
+    cmd = 'sudo cp /vagrant/scripts/clmc-agent/telegraf.conf /etc/telegraf/'
+    (out, err, code) = run_command(cmd)
     assert code == 0, "Failed to copy telegraf.conf : " + str(code) + ", cmd=" + cmd
 
     cmd = 'sudo cp /vagrant/scripts/clmc-agent/telegraf_output.conf /etc/telegraf/telegraf.d/'
@@ -57,56 +58,50 @@ def test_write_telegraf_conf():
     assert code == 0, "Failed to copy telegraf_output.conf : " + str(code) + ", cmd=" + cmd
 
     # run template relacement script with incorrect arguments
-    cmd = 'sudo /vagrant/scripts/clmc-agent/configure.sh' 
+    cmd = 'sudo /vagrant/scripts/clmc-agent/configure.sh'
     (out, err, code) = run_command(cmd)
-    assert code == 1, "Failed to return error on incorrect arguments : " + str(code) + ", cmd=" + cmd  
+    assert code == 1, "Failed to return error on incorrect arguments : " + str(code) + ", cmd=" + cmd
 
     # run template relacement script with all arguments
-    cmd = 'sudo /vagrant/scripts/clmc-agent/configure.sh ' + LOCATION + ' ' + SFC_ID + ' ' + SFC_ID_INSTANCE + ' ' + SF_ID + ' ' + SF_ID_INSTANCE + ' ' + IP_ENDPOINT_ID + ' ' + SR_ID + ' ' + INFLUXDB_URL + ' ' + DATABASE_NAME
+    cmd = 'sudo /vagrant/scripts/clmc-agent/configure.sh ' + LOCATION + ' ' + SFC_ID + ' ' + SFC_INSTANCE_ID + ' ' + SF_PACKAGE_ID + ' ' + SF_ID + ' ' + SF_ENDPOINT_ID + ' ' + INFLUXDB_URL
     (out, err, code) = run_command(cmd)
     assert code == 0, "Configure command returned error, output=" + str(out) + ", cmd=" + cmd
 
     # check that replacement was correct in telegraf.conf
-    try:        
-        TELEGRAF_GENERAL_CONF_FILE = TELEGRAF_CONF_DIR + "/telegraf.conf"
+    TELEGRAF_GENERAL_CONF_FILE = TELEGRAF_CONF_DIR + "/telegraf.conf"
+    try:
         with open(TELEGRAF_GENERAL_CONF_FILE) as general_conf:
-          lines = general_conf.read()          
-          assert lines.find(LOCATION), "Cannot find location" 
-          assert lines.find(SFC_ID), "Cannot find sfc_id"
-          assert lines.find(SFC_ID_INSTANCE), "Cannot find sfc_id_instance"  
-          assert lines.find(SF_ID), "Cannot find sfc_id"            
-          assert lines.find(SF_ID_INSTANCE), "Cannot find sf_id_instance"
-          assert lines.find(IP_ENDPOINT_ID), "Cannot find endpoint"      
-          assert lines.find(SR_ID), "Cannot find sr_id"                                
+            lines = general_conf.read()
+            assert lines.find(LOCATION), "Cannot find location"
+            assert lines.find(SFC_ID), "Cannot find sfc_id"
+            assert lines.find(SFC_INSTANCE_ID), "Cannot find sfc_instance_id"
+            assert lines.find(SF_PACKAGE_ID), "Cannot find sf_package_id"
+            assert lines.find(SF_ID), "Cannot find sf_id"
+            assert lines.find(SF_ENDPOINT_ID), "Cannot find sf_endpoint_id"
     except FileNotFoundError:
         assert False, "Telegraf general conf file not found, " + TELEGRAF_GENERAL_CONF_FILE
 
     # check that replacement was correct in telegraf_output.conf
+    TELEGRAF_OUTPUT_CONF_FILE = TELEGRAF_CONF_DIR + "/telegraf.d/telegraf_output.conf"
     try:
-        TELEGRAF_OUTPUT_CONF_FILE = TELEGRAF_CONF_DIR + "/telegraf.d/telegraf_output.conf"
         with open(TELEGRAF_OUTPUT_CONF_FILE) as output_conf:
-          lines = output_conf.read()
-          assert lines.find(INFLUXDB_URL), "Cannot find influx_db" 
-          assert lines.find(DATABASE_NAME), "Cannot find database"                    
+            lines = output_conf.read()
+            assert lines.find(INFLUXDB_URL), "Cannot find influx_db url"
+            assert lines.find(DATABASE_NAME), "Cannot find database"
     except FileNotFoundError:
         assert False, "Telegraf output conf file not found, " + TELEGRAF_OUTPUT_CONF_FILE
 
-  finally:
-      # clean up telegraf after test
-#      run_command("sudo rm -rf /etc/telegraf")
-       print("final")
-# wrapper for executing commands on the cli, returning (std_out, std_error, process_return_code)
+
 def run_command(cmd):
-    """Run a shell command.
+    """
+    Run a shell command. Wrapper for executing commands on the cli, returning (std_out, std_error, process_return_code)
 
-    Arguments:
-        cmd {string} -- command to run in the shell
+    :param cmd: {string} -- command to run in the shell
 
-    Returns:
-        stdout, stderr, exit code -- tuple of the process's stdout, stderr and exit code (0 on success)
+    :return: stdout, stderr, exit code -- tuple of the process's stdout, stderr and exit code (0 on success)
     """
+
     proc = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True)
     out, err = proc.communicate()
     return_code = proc.returncode
     return out, err, return_code
-
diff --git a/src/test/setup.py b/src/test/setup.py
index 78c16282f64d6cbfd25cc32ed19cbe13849ed05e..7f03b5ef45e8e099d41094b1597b253473ae8ec7 100644
--- a/src/test/setup.py
+++ b/src/test/setup.py
@@ -27,32 +27,41 @@ import os.path
 from setuptools import setup, find_packages
 
 
-def read(fname):
-    return open(os.path.join(os.path.dirname(__file__), fname)).read()
+def get_version(*relative_path):
+    """
+    Reads and parses a version file.
 
+    :param relative_path: iterable representing the relative path to the version file
+    :return:
+    """
+
+    fname = os.path.join(os.path.dirname(__file__), *relative_path)
 
-def get_version(fname):
     if os.path.isfile(fname):
-      git_revision = read(fname)
+        with open(fname) as f:  # Use context managers when opening files, otherwise file handlers might not be properly closed
+            version = {}
+            # execute the version file and put its content in the version dictionary
+            exec(f.read(), version)
+            # extract the __version__ variable from the dictionary, if not found use default value "SNAPSHOT"
+            git_revision = version.get("__version__", "SNAPSHOT")
     else:
-      git_revision = "SNAPSHOT"
+        git_revision = "SNAPSHOT"
 
     return git_revision
 
 
 setup(
-    name = "clmctest",
-    version = get_version("_version.py"),
-    author = "Michael Boniface",
-    author_email = "mjb@it-innovation.soton.ac.uk",
-    description = "FLAME CLMC Test Module",
-    license = "https://gitlab.it-innovation.soton.ac.uk/FLAME/flame-clmc/blob/integration/LICENSE",
-    keywords = "FLAME CLMC",
+    name="clmctest",
+    version=get_version("VERSION"),
+    author="Michael Boniface",
+    author_email="mjb@it-innovation.soton.ac.uk",
+    description="FLAME CLMC Test Module",
+    license="https://gitlab.it-innovation.soton.ac.uk/FLAME/flame-clmc/blob/integration/LICENSE",
+    keywords="FLAME CLMC",
     url='https://gitlab.it-innovation.soton.ac.uk/FLAME/flame-clmc',
-    packages=find_packages(exclude=["services"]),
+    packages=find_packages(),
     include_package_data=True,
-    package_data={'': ['_version.py', '*.yml', '*.sh', '*.json', '*.conf']},        
-    long_description="FLAME CLMC",
+    long_description="FLAME CLMC Test",
     classifiers=[
         "Development Status :: Alpha",
         "Topic :: FLAME Tests",