diff --git a/scripts/clmc-agent/configure.sh b/scripts/clmc-agent/configure.sh
index 9f98a51303b1e363a9dbf125c6c775742cf82849..13a87bf9e794a017c062c74a29718459219afab0 100755
--- a/scripts/clmc-agent/configure.sh
+++ b/scripts/clmc-agent/configure.sh
@@ -20,6 +20,8 @@
 #//
 #//      Created By :            Michael Boniface
 #//      Created Date :          13/12/2017
+#//      Updated By :            Nikolay Stanchev
+#//      Updated Date :          30/08/2018
 #//      Created for Project :   FLAME
 #//
 #/////////////////////////////////////////////////////////////////////////
@@ -30,21 +32,21 @@ set -euo pipefail
 echo "Configuring Telegraf agent general and output configuration"
 
 # Get command line parameters
-if [ "$#" -ne 9 ]; then
+if [ "$#" -ne 7 ]; then
     echo "Error: illegal number of arguments: "$#
-    echo "Usage: configure.sh LOCATION SFC_ID SFC_ID_INSTANCE SF_ID SF_ID_INSTANCE IP_ENDPOINT_ID SR_ID INFLUXDB_URL DATABASE_NAME"
+    echo "Usage: configure.sh LOCATION SFC_ID SFC_INSTANCE_ID SF_PACKAGE_ID SF_ID SF_ENDPOINT_ID INFLUXDB_URL"
     exit 1 
 fi
 
 LOCATION=$1
+SERVER=${LOCATION}
 SFC_ID=$2
-SFC_ID_INSTANCE=$3
-SF_ID=$4
-SF_ID_INSTANCE=$5
-IP_ENDPOINT_ID=$6
-SR_ID=$7
-INFLUXDB_URL=$8
-DATABASE_NAME=$9
+SFC_INSTANCE_ID=$3
+SF_PACKAGE_ID=$4
+SF_ID=$5
+SF_ENDPOINT_ID=$6
+INFLUXDB_URL=$7
+DATABASE_NAME=${SFC_ID}  # DATABASE IS NAMED AFTER SFC ID
 
 TELEGRAF_CONF_DIR="/etc/telegraf"
 TELEGRAF_CONF_FILE=${TELEGRAF_CONF_DIR}"/telegraf.conf"
@@ -56,11 +58,10 @@ TELEGRAF_OUTPUT_CONF_FILE=${TELEGRAF_INCLUDE_CONF_DIR}"/telegraf_output.conf"
 # Replace template parameters on general configuration
 sed -i 's/${LOCATION}/'${LOCATION}'/g' ${TELEGRAF_CONF_FILE}
 sed -i 's/${SFC_ID}/'${SFC_ID}'/g' ${TELEGRAF_CONF_FILE}
-sed -i 's/${SFC_ID_INSTANCE}/'${SFC_ID_INSTANCE}'/g' ${TELEGRAF_CONF_FILE}
+sed -i 's/${SFC_INSTANCE_ID}/'${SFC_INSTANCE_ID}'/g' ${TELEGRAF_CONF_FILE}
+sed -i 's/${SF_PACKAGE_ID}/'${SF_PACKAGE_ID}'/g' ${TELEGRAF_CONF_FILE}
 sed -i 's/${SF_ID}/'${SF_ID}'/g' ${TELEGRAF_CONF_FILE}
-sed -i 's/${SF_ID_INSTANCE}/'${SF_ID_INSTANCE}'/g' ${TELEGRAF_CONF_FILE}
-sed -i 's/${IP_ENDPOINT_ID}/'${IP_ENDPOINT_ID}'/g' ${TELEGRAF_CONF_FILE}
-sed -i 's/${SR_ID}/'${SR_ID}'/g' ${TELEGRAF_CONF_FILE}
+sed -i 's/${SF_ENDPOINT_ID}/'${SF_ENDPOINT_ID}'/g' ${TELEGRAF_CONF_FILE}
 
 echo "Telegraf Output Configuration File: ${TELEGRAF_OUTPUT_CONF_FILE}"
 
diff --git a/scripts/clmc-agent/telegraf.conf b/scripts/clmc-agent/telegraf.conf
index 07ac73a4dcf6a067a099ce629d9fc4f81bfb1ed2..29ca0ae01cadfacc183613fad22b90d54a9c74c7 100644
--- a/scripts/clmc-agent/telegraf.conf
+++ b/scripts/clmc-agent/telegraf.conf
@@ -17,6 +17,8 @@
 ##
 ##      Created By :            Simon Crowle
 ##      Created Date :          03-01-2018
+##      Updated By :            Nikolay Stanchev
+##      Updated Date :          30-08-2018
 ##      Created for Project :   FLAME
 
 # Telegraf configuration
@@ -34,18 +36,18 @@
 [global_tags]
   # location of the data centre
   location="${LOCATION}"
-  # media service template id
+  # hostname of cluster/server (potentially virtualized) that boots up service function containers
+  server = "${LOCATION}"
+  # media service template id (defined in the TOSCA resource spec.)
   sfc="${SFC_ID}"
-  # media service instance
-  sfc_i="${SFC_ID_INSTANCE}"
-  # service function type
+  # media service instance id (defined in the TOSCA resource spec.)
+  sfci="${SFC_INSTANCE_ID}"
+  # service function package id (packaging time)
+  sfp="${SF_PACKAGE_ID}"
+  # service function id (defined in the TOSCA resource spec.)
   sf="${SF_ID}"
-  # service function instance id
-  sf_i="${SF_ID_INSTANCE}"
-  # ipendpoint id aka surrogate instance
-  ipendpoint="${IP_ENDPOINT_ID}"
-  # the service router providing access to the network
-  sr="${SR_ID}"
+  # service function IP endpoint id aka surrogate instance
+  sfe="${SF_ENDPOINT_ID}"
 
 # Configuration for telegraf agent
 [agent]
diff --git a/scripts/clmc-agent/telegraf_output.conf b/scripts/clmc-agent/telegraf_output.conf
index 7521ac34a05a0c8f7ccdc166a772fbd2d0ee5544..734d93c965bb0919de37da55f4f06c3cad6398c1 100644
--- a/scripts/clmc-agent/telegraf_output.conf
+++ b/scripts/clmc-agent/telegraf_output.conf
@@ -17,6 +17,8 @@
 ##
 ##      Created By :            Michael Boniface
 ##      Created Date :          08-03-2018
+##      Updated By :            Nikolay Stanchev
+##      Updated Date :          30-08-2018
 ##      Created for Project :   FLAME
 
 ###############################################################################
@@ -31,7 +33,7 @@
   # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
   urls = ["${INFLUXDB_URL}"] # required
   # The target database for metrics (telegraf will create it if not exists)
-  database = "${DATABASE_NAME}" # required
+  database = "${DATABASE_NAME}" # required - MUST BE NAMED AFTER THE SERVICE FUNCTION CHAIN ID
   # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
   # note: using second precision greatly helps InfluxDB compression
   precision = "s"
diff --git a/scripts/clmc-service/install-clmc-service.sh b/scripts/clmc-service/install-clmc-service.sh
index 75a6453bbfb59af31ad783d215f090a25ab6b489..36877c4d44d7d04a8f59ca53f986461cbe83f3dc 100755
--- a/scripts/clmc-service/install-clmc-service.sh
+++ b/scripts/clmc-service/install-clmc-service.sh
@@ -1,16 +1,5 @@
 #!/bin/bash
 
-# Get command line parameters
-if [ "$#" -ne 3 ]; then
-    echo "Error: illegal number of arguments: "$#
-    echo "Usage: install-clmc-service.sh INFLUX_URL DATABASE_NAME REPORT_PERIOD"
-    exit 1 
-fi
-
-INFLUX_URL=$1
-DATABASE_NAME=$2
-REPORT_PERIOD=$3
-
 apt-get update
 apt-get install libssl-dev -y
 
@@ -141,13 +130,3 @@ do
   echo "Waiting for clmc service port 9080 to be ready on localhost..."
   sleep 5
 done
-
-# configure the CLMC service
-JSON="{\"aggregator_report_period\": ${REPORT_PERIOD}, \"aggregator_database_name\": \"${DATABASE_NAME}\", \"aggregator_database_url\": \"${INFLUX_URL}\"}"
-echo "CONFIG JSON=${JSON}"
-curl -H 'Content-Type: application/json' -X PUT -d "${JSON}" http://localhost:9080/aggregator/config
-
-# start the aggregator
-JSON="{\"action\": \"start\"}"
-echo "START ACTION JSON=${JSON}"
-curl -H 'Content-Type: application/json' -X PUT -d "${JSON}" http://localhost:9080/aggregator/control
\ No newline at end of file
diff --git a/scripts/clmc-service/install.sh b/scripts/clmc-service/install.sh
index 420dddab39d17444c001114ee0427b7167e4b60d..90039438a70ee2c502a3d335f9174f80be87cdf9 100755
--- a/scripts/clmc-service/install.sh
+++ b/scripts/clmc-service/install.sh
@@ -27,18 +27,11 @@
 # Force fail on command fail (off for now as virtualenvwrapper install fails)
 set -euo pipefail
 
-# Get command line parameters
-if [ "$#" -ne 3 ]; then
-    echo "Error: illegal number of arguments: "$#
-    echo "Usage: install.sh INFLUX_URL DATABASE_NAME REPORT_PERIOD"
-    exit 1 
-fi
-
 # Ensure everything runs in directory of the parent script
 cd `dirname $0`
 
 echo "Provisioning CLMC service"
 
-./install-tick-stack.sh $@
-./install-neo4j.sh $@
-./install-clmc-service.sh $@
\ No newline at end of file
+./install-tick-stack.sh
+./install-neo4j.sh
+./install-clmc-service.sh
\ No newline at end of file
diff --git a/scripts/test/fixture.sh b/scripts/test/fixture.sh
index 0aa2f4c0d55663007a87daa01e5f8fd7a5783057..725e21d8e58a2df1f129c3c7e0a606d595d0d4ac 100755
--- a/scripts/test/fixture.sh
+++ b/scripts/test/fixture.sh
@@ -50,10 +50,7 @@ create() {
         # provision software into each container
         echo "Provisioning: ${service_name}"
         if [ ${service_name} == "clmc-service" ]; then
-            influxdb_url=$(echo $SERVICE | jq -r '.influxdb_url')
-            database_name=$(echo $SERVICE | jq -r '.database_name')
-            report_period=$(echo $SERVICE | jq -r '.report_period')
-            cmd="/vagrant/scripts/clmc-service/install.sh ${influxdb_url} ${database_name} ${report_period}"
+            cmd="/vagrant/scripts/clmc-service/install.sh"
             echo "Provisioning command ${cmd}"
             lxc exec ${service_name} --env REPO_ROOT="/vagrant" -- ${cmd}
             exit_code=$?
@@ -73,21 +70,19 @@ create() {
         else
             # get container parameters
             location=$(echo $SERVICE | jq -r '.location')
+            sf_package_id=$(echo $SERVICE | jq -r '.sf_package_id')
             sf_id=$(echo $SERVICE | jq -r '.sf_id')
-            sf_id_instance=$(echo $SERVICE | jq -r '.sf_id_instance')
             sfc_id=$(echo $SERVICE | jq -r '.sfc_id')
-            sfc_id_instance=$(echo $SERVICE | jq -r '.sfc_id_instance')
-            sr_id=$(echo $SERVICE | jq -r '.sr_id')
-            ipendpoint_id=$(echo $SERVICE | jq -r '.ipendpoint_id')
+            sfc_instance_id=$(echo $SERVICE | jq -r '.sfc_instance_id')
+            sf_endpoint_id=$(echo $SERVICE | jq -r '.sf_endpoint_id')
             influxdb_url=$(echo $SERVICE | jq -r '.influxdb_url')
-            database_name=$(echo $SERVICE | jq -r '.database_name')
 
             # install service function specific software
-            cmd=/vagrant/src/test/clmctest/services/${sf_id}/install.sh
+            cmd=/vagrant/src/test/clmctest/services/${sf_package_id}/install.sh
             lxc exec ${service_name} --env REPO_ROOT="/vagrant" -- ${cmd}
             exit_code=$?
             if [ $exit_code != 0 ]; then
-                echo "${sf_id} installation failed with exit code ${exit_code}"
+                echo "${sf_package_id} installation failed with exit code ${exit_code}"
                 exit 1
             fi
             # install telegraf
@@ -109,11 +104,11 @@ create() {
             # copy the 'host' config into all service containers
             cp -f ${repo_root}/src/test/clmctest/services/host/telegraf*.conf ${container_dir}/etc/telegraf/telegraf.d/
             # copy the service-specific config
-            cp -f ${repo_root}/src/test/clmctest/services/${sf_id}/telegraf*.conf ${container_dir}/etc/telegraf/telegraf.d/
+            cp -f ${repo_root}/src/test/clmctest/services/${sf_package_id}/telegraf*.conf ${container_dir}/etc/telegraf/telegraf.d/
             chown -R 100000:100000 ${container_dir}/etc/telegraf/
 
             # replace telegraf template with container parameters
-            cmd="/vagrant/scripts/clmc-agent/configure.sh ${location} ${sfc_id} ${sfc_id_instance} ${sf_id} ${sf_id_instance} ${ipendpoint_id} ${sr_id} ${influxdb_url} ${database_name}"
+            cmd="/vagrant/scripts/clmc-agent/configure.sh ${location} ${sfc_id} ${sfc_instance_id} ${sf_package_id} ${sf_id} ${sf_endpoint_id} ${influxdb_url}"
             lxc exec ${service_name} -- ${cmd}
 
             # start telegraf
diff --git a/src/service/clmcservice/__init__.py b/src/service/clmcservice/__init__.py
index b34c2c9fdbef516b960c22745dcf33360935a34b..b710578a689fb7066c777e3143f8883c49e05bf1 100644
--- a/src/service/clmcservice/__init__.py
+++ b/src/service/clmcservice/__init__.py
@@ -33,7 +33,6 @@ from sqlalchemy import engine_from_config
 
 # CLMC-service imports
 from clmcservice.models.meta import DBSession, Base
-from clmcservice.aggregationapi.utilities import validate_conf_file, MALFORMED_FLAG, CONF_FILE_ATTRIBUTE, CONF_OBJECT
 
 
 ROOT_DIR = dirname(abspath(__file__))  # get the path of the root package (clmcservice) as a global variable
@@ -48,12 +47,6 @@ def main(global_config, **settings):
     DBSession.configure(bind=engine)  # bind the engine to a DB session
     Base.metadata.bind = engine  # bind the engine to the Base class metadata
 
-    # validate and use (if valid) the configuration file for the aggregator
-    conf_file_path = settings[CONF_FILE_ATTRIBUTE]
-    conf = validate_conf_file(conf_file_path)  # if None returned here, service is in unconfigured state
-    settings[CONF_OBJECT] = conf
-    settings[MALFORMED_FLAG] = False
-
     settings['influx_port'] = int(settings['influx_port'])  # the influx port setting must be converted to integer instead of a string
     settings['kapacitor_port'] = int(settings['kapacitor_port'])  # the kapacitor port setting must be converted to integer instead of a string
 
@@ -64,10 +57,6 @@ def main(global_config, **settings):
 
     config = Configurator(settings=settings)
 
-    # add routes of the aggregator API
-    config.add_route('aggregator_config', '/aggregator/config')
-    config.add_route('aggregator_controller', '/aggregator/control')
-
     # add routes of the WHOAMI API
     config.add_route('whoami_endpoints', '/whoami/endpoints')
     config.add_route('whoami_endpoints_instance', 'whoami/endpoints/instance')
diff --git a/src/service/clmcservice/aggregation/__init__.py b/src/service/clmcservice/aggregation/__init__.py
deleted file mode 100644
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/aggregation/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/src/service/clmcservice/aggregation/aggregator.py b/src/service/clmcservice/aggregation/aggregator.py
deleted file mode 100644
index 1199562d9b72240bdbeb9d9d29f0ee99b6a19f1c..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/aggregation/aggregator.py
+++ /dev/null
@@ -1,279 +0,0 @@
-#!/usr/bin/python3
-"""
-## © University of Southampton IT Innovation Centre, 2018
-##
-## Copyright in this software belongs to University of Southampton
-## IT Innovation Centre of Gamma House, Enterprise Road,
-## Chilworth Science Park, Southampton, SO16 7NS, UK.
-##
-## This software may not be used, sold, licensed, transferred, copied
-## or reproduced in whole or in part in any manner or form or in or
-## on any media by any person other than in accordance with the terms
-## of the Licence Agreement supplied with the software, or otherwise
-## without the prior written consent of the copyright owners.
-##
-## This software is distributed WITHOUT ANY WARRANTY, without even the
-## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-## PURPOSE, except where stated in the Licence Agreement supplied with
-## the software.
-##
-##      Created By :            Nikolay Stanchev
-##      Created Date :          25-04-2018
-##      Created for Project :   FLAME
-"""
-
-from threading import Thread, Event
-from influxdb import InfluxDBClient
-from time import time, sleep
-from urllib.parse import urlparse
-from clmcservice.aggregationapi.utilities import generate_e2e_delay_report
-import getopt
-import logging
-
-
-class Aggregator(object):
-    """
-    A class used to perform the aggregation feature of the CLMC - aggregating network and media service measurements. Implemented as a separate process.
-    """
-
-    REPORT_PERIOD = 5  # default report period is 5s, that is every 5 seconds the mean delay values for the last 5 seconds are aggregated
-    DATABASE = 'CLMCMetrics'  # default database the aggregator uses
-    DATABASE_URL = 'http://172.40.231.51:8086'  # default database URL the aggregator uses
-
-    def __init__(self, database_name=DATABASE, database_url=DATABASE_URL, report_period=REPORT_PERIOD, logger=None):
-        """
-        Constructs an Aggregator instance.
-
-        :param database_name: database name to use
-        :param database_url: database url to use
-        :param report_period: the report period in seconds
-        """
-
-        if logger is None:
-            self.log = logging.getLogger(__name__)
-        else:
-            self.log = logger
-
-        self.log.info("Connecting to Influx database {0} with URL {1}".format(database_name, database_url))
-        # initialise a database client using the database url and the database name
-        url_object = urlparse(database_url)
-        self.db_client = InfluxDBClient(host=url_object.hostname, port=url_object.port, database=database_name, timeout=10)
-        self.log.info("Successfully connected to Influx database {0} with URL {1}".format(database_name, database_url))
-
-        self.db_url = database_url
-        self.db_name = database_name
-        self.report_period = report_period
-
-        # a cache-like dictionaries to store the last reported values, which can be used to fill in missing values
-        self.network_cache = {}
-        self.service_cache = {}
-
-        # a stop flag event object used to handle the stopping of the process
-        self._stop_flag = Event()
-
-    def stop(self):
-        """
-        Stop the aggregator from running.
-        """
-
-        self.log.info("Aggregator's stop flag has been set.")
-        self._stop_flag.set()
-
-    def run(self):
-        """
-        Performs the functionality of the aggregator - query data from both measurements merge that data and post it back in influx every 5 seconds.
-        """
-
-        self.log.info("Aggregator started running.")
-
-        current_time = int(time())
-        while not self._stop_flag.is_set():
-            self.log.info("Trying to generate an E2E measurement.")
-
-            boundary_time = current_time - self.report_period
-
-            boundary_time_nano = boundary_time * 1000000000
-            current_time_nano = current_time * 1000000000
-
-            # query the network delays and group them by path ID
-            network_delays = {}
-            result = self.db_client.query(
-                'SELECT mean(latency) as "net_latency", mean(bandwidth) as "net_bandwidth" FROM "{0}"."autogen"."network_delays" WHERE time >= {1} and time < {2} GROUP BY path, source, target'.format(
-                    self.db_name, boundary_time_nano, current_time_nano))
-
-            for item in result.items():
-                metadata, result_points = item
-                # measurement = metadata[0]
-                tags = metadata[1]
-
-                result_point = next(result_points)
-                network_delays[(tags['path'], tags['source'], tags['target'])] = result_point['net_latency'], result_point['net_bandwidth']
-                self.network_cache[(tags['path'], tags['source'], tags['target'])] = result_point['net_latency'], result_point['net_bandwidth']
-
-            # query the service delays and group them by endpoint, service function instance and sfr
-            service_delays = {}
-            result = self.db_client.query(
-                'SELECT mean(response_time) as "response_time", mean(request_size) as "request_size", mean(response_size) as "response_size" FROM "{0}"."autogen"."service_delays" WHERE time >= {1} and time < {2} GROUP BY endpoint, sf_instance, sfr'.format(
-                    self.db_name, boundary_time_nano, current_time_nano))
-
-            for item in result.items():
-                metadata, result_points = item
-                # measurement = metadata[0]
-                tags = metadata[1]
-                result_point = next(result_points)
-                service_delays[tags['sfr']] = (result_point['response_time'], result_point['request_size'], result_point['response_size'], tags['endpoint'], tags['sf_instance'])
-                self.service_cache[tags['sfr']] = (result_point['response_time'], result_point['request_size'], result_point['response_size'], tags['endpoint'], tags['sf_instance'])
-
-            # for each network path check if there is a media service delay report for the target sfr - if so, generate an e2e_delay measurement
-            for path in network_delays:
-                path_id, source, target = path
-
-                # check if we have a reverse path without a forward path for a potential aggregated row - e.g. SR3 to SR1 network row with service on SR3 and no row from SR1 to SR3
-                if (source in service_delays or source in self.service_cache) and (path_id, target, source) not in network_delays and (path_id, target, source) in self.network_cache:
-                    # hence search for the forward path in the cache
-                    forward_path = self.network_cache.get((path_id, target, source))
-                    reverse_path = network_delays.get((path_id, source, target))
-                    forward_delay = forward_path[0]
-                    avg_bandwidth = forward_path[1]
-                    reverse_delay = reverse_path[0]
-                    service_delay = service_delays.get(source, self.service_cache.get(source))
-                    response_time, request_size, response_size, endpoint, sf_instance = service_delay
-                    self.db_client.write_points(
-                        generate_e2e_delay_report(path_id, target, source, endpoint, sf_instance, forward_delay, reverse_delay, response_time,
-                                                  request_size, response_size, avg_bandwidth, boundary_time))
-
-                # check if target sfr is reported in service delays, in other words - if there is a media service instance being connected to target sfr
-                if target not in service_delays and target not in self.service_cache:
-                    # if not continue with the other network path reports
-                    continue
-
-                e2e_arguments = {"path_ID": None, "source_SFR": None, "target_SFR": None, "endpoint": None, "sf_instance": None, "delay_forward": None, "delay_reverse": None,
-                                 "delay_service": None, "avg_request_size": None, "avg_response_size": None, "avg_bandwidth": None, "time": boundary_time}
-
-                e2e_arguments['path_ID'] = path_id
-                e2e_arguments['source_SFR'] = source
-                e2e_arguments['target_SFR'] = target
-                e2e_arguments['delay_forward'] = network_delays[path][0]
-                e2e_arguments['avg_bandwidth'] = network_delays[path][1]
-
-                # reverse the path ID to get the network delay for the reversed path
-                reversed_path = (path_id, target, source)
-                if reversed_path in network_delays or reversed_path in self.network_cache:
-                    # get the reverse delay, use the latest value if reported or the cache value
-                    e2e_arguments['delay_reverse'] = network_delays.get(reversed_path, self.network_cache.get(reversed_path))[0]
-                else:
-                    e2e_arguments['delay_reverse'] = None
-
-                # get the response time of the media component connected to the target SFR
-                service_delay = service_delays.get(target, self.service_cache.get(target))
-                response_time, request_size, response_size, endpoint, sf_instance = service_delay
-                # put these points in the e2e arguments dictionary
-                e2e_arguments['delay_service'] = response_time
-                e2e_arguments['avg_request_size'] = request_size
-                e2e_arguments['avg_response_size'] = response_size
-                e2e_arguments['endpoint'] = endpoint
-                e2e_arguments['sf_instance'] = sf_instance
-
-                # if all the arguments of the e2e delay measurements were reported, then generate and post to Influx an E2E measurement row
-                if None not in e2e_arguments.values():
-                    self.db_client.write_points(
-                        generate_e2e_delay_report(e2e_arguments['path_ID'], e2e_arguments['source_SFR'], e2e_arguments['target_SFR'], e2e_arguments['endpoint'],
-                                                  e2e_arguments['sf_instance'], e2e_arguments['delay_forward'], e2e_arguments['delay_reverse'],
-                                                  e2e_arguments['delay_service'],
-                                                  e2e_arguments["avg_request_size"], e2e_arguments['avg_response_size'], e2e_arguments['avg_bandwidth'],
-                                                  e2e_arguments['time']))
-                    self.log.info("Successfully generated an E2E measurement and posted back to Influx.")
-                else:
-                    self.log.info("Couldn't generate an E2E measurement although some of the data could be fetched.")
-
-            old_timestamp = current_time
-            # wait until {report_period) seconds have passed
-            while current_time < old_timestamp + self.report_period:
-                sleep(1)
-                current_time = int(time())
-
-        self.log.info("Aggregator stopped running.")
-
-
-class AggregatorThread(Thread):
-    """
-    A utility class used to wrap around the Aggregator class and return a Thread instance, which can then be used for testing (provides start and stop methods)
-    """
-
-    REPORT_PERIOD = 5  # currently, report period is 5s, that is every 5 seconds the mean delay values for the last 5 seconds are aggregated
-    DATABASE = 'CLMCMetrics'  # default database the aggregator uses
-    DATABASE_URL = 'http://172.40.231.51:8086'  # default database URL the aggregator uses
-
-    def __init__(self, database=DATABASE, database_url=DATABASE_URL, report_period=REPORT_PERIOD):
-        """
-        Constructs an Aggregator instance.
-
-        :param database: database name to use
-        :param database_url: database url to use
-        """
-
-        super(AggregatorThread, self).__init__()  # call the constructor of the thread
-
-        self.aggregator = Aggregator(database_name=database, database_url=database_url, report_period=report_period)
-
-    def stop(self):
-        """
-        A method used to stop the thread.
-        """
-
-        self.aggregator.stop()
-
-    def run(self):
-        """
-        The method to execute when the thread starts.
-        """
-
-        self.aggregator.run()
-
-
-if __name__ == '__main__':
-    # initialise a file logger, only when module's main method is run (NOT when aggregator class is imported somewhere else)
-    log = logging.getLogger('aggregator')
-    hdlr = logging.FileHandler('/var/log/flame/clmc/aggregator.log', mode='a')
-    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
-    hdlr.setFormatter(formatter)
-    log.addHandler(hdlr)
-    log.setLevel(logging.DEBUG)
-
-    # log all errors that are thrown in the execution of the aggregator with the logger object initialized above
-    import sys
-    import traceback
-
-    def report_error(error_type, error_value, error_traceback, log_object=log):
-        log_object.error("Uncaught error thrown!")
-        log_object.error("Error type: {0}".format(error_type))
-        log_object.error("Error value: {0}".format(error_value))
-        log_object.debug("Error traceback:")
-        for trace in traceback.format_tb(error_traceback):
-            log_object.debug(trace)
-
-    sys.excepthook = report_error
-
-    # Parse command line options
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], "p:d:u:", ['period=', 'database=', 'url='])
-
-        arg_period = Aggregator.REPORT_PERIOD
-        arg_database_name = Aggregator.DATABASE
-        arg_database_url = Aggregator.DATABASE_URL
-
-        # Apply parameters if given
-        for opt, arg in opts:
-            if opt in ('-p', '--period'):
-                arg_period = int(arg)
-            elif opt in ('-d', '--database'):
-                arg_database_name = arg
-            elif opt in ('-u', '--url'):
-                arg_database_url = arg
-
-        Aggregator(database_name=arg_database_name, database_url=arg_database_url, report_period=arg_period, logger=log).run()
-
-    # log.info the error messages in case of a parse error
-    except getopt.GetoptError as err:
-        log.info(err)
-        log.info('Parse error; run the script using the following format: python aggregator.py -p <seconds> -d <database name> -u <database url>')
diff --git a/src/service/clmcservice/aggregation/influx_data_interface.py b/src/service/clmcservice/aggregation/influx_data_interface.py
deleted file mode 100644
index c6781d0c35d7d77c68932499591e9e67968c637d..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/aggregation/influx_data_interface.py
+++ /dev/null
@@ -1,241 +0,0 @@
-#!/usr/bin/python3
-"""
-## © University of Southampton IT Innovation Centre, 2018
-##
-## Copyright in this software belongs to University of Southampton
-## IT Innovation Centre of Gamma House, Enterprise Road,
-## Chilworth Science Park, Southampton, SO16 7NS, UK.
-##
-## This software may not be used, sold, licensed, transferred, copied
-## or reproduced in whole or in part in any manner or form or in or
-## on any media by any person other than in accordance with the terms
-## of the Licence Agreement supplied with the software, or otherwise
-## without the prior written consent of the copyright owners.
-##
-## This software is distributed WITHOUT ANY WARRANTY, without even the
-## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-## PURPOSE, except where stated in the Licence Agreement supplied with
-## the software.
-##
-##      Created By :            Nikolay Stanchev
-##      Created Date :          04-06-2018
-##      Created for Project :   FLAME
-"""
-
-
-from clmcservice.aggregationapi.utilities import generate_e2e_delay_report
-
-"""
-A python module which provides auxiliary functions to mimic the behaviour of an InfluxDBClient when unit testing the aggregator.
-"""
-
-
-class MockResultSet(object):
-    """
-    A mock object used to mimic the behaviour of a ResultSet in the influx library (we only need the functionality of an object that collects
-    a group of points and has an items() method to get the collected points.
-    """
-
-    def __init__(self, points):
-        """
-        Initialise the mock result set.
-
-        :param points: the collected points
-        """
-
-        self.points = points
-
-    def items(self):
-        """
-        Get the data points in the result set.
-
-        :return: the collected data points
-        """
-
-        return self.points
-
-
-# The following are network-related auxiliary functions to generate test data.
-
-def _network_result_point(net_latency, net_bandwidth):
-    """
-    Returns a generator, which yields one data point representing a network measurement (fields only)
-
-    :param net_latency: the reported network latency
-    :param net_bandwidth: the reported network bandwidth.
-
-    :return: a generator object with one element (same behaviour is used in the influxdb library even when only one point is returned from the query)
-    """
-
-    yield {"net_latency": net_latency, "net_bandwidth": net_bandwidth}
-
-
-def _network_tags(path, source, target):
-    """
-    Returns a dictionary representing a network measurement (tags only)
-
-    :param path: the path identifier
-    :param source: the source service router
-    :param target: the target service router
-
-    :return: a dictionary with those values
-    """
-
-    return {"path": path, "source": source, "target": target}
-
-
-def _network_metadata(measurement, path, source, target):
-    """
-    Returns an influxdb-styled metadata about a network measurement.
-
-    :param measurement: the measurement table name
-    :param path: the path identifier
-    :param source: the source service router
-    :param target: the target service router
-
-    :return: a tuple with the first element being the measurement name and the second element being a dictionary with the network measurement tag values
-    """
-
-    return measurement, _network_tags(path, source, target)
-
-
-def network_result_item(measurement, path, source, target, net_latency, net_bandwidth):
-    """
-    Returns a full influxdb-styled network measurement item - with tag and field values.
-
-    :param measurement: the measurement table name
-    :param path: the path identifier
-    :param source: the source service router
-    :param target: the target service router
-    :param net_latency: the reported network latency
-    :param net_bandwidth: the reported network bandwidth.
-
-    :return: a tuple with the first element being the result metadata (measurement name and tags) and the second element being the data field points
-    """
-
-    return _network_metadata(measurement, path, source, target), _network_result_point(net_latency, net_bandwidth)
-
-
-# The following are service-related auxiliary functions to generate test data.
-
-def _service_result_point(response_time, request_size, response_size):
-    """
-    Returns a generator, which yields one data point representing a service measurement (fields only)
-
-    :param response_time: the response time of the service
-    :param request_size: the averaged request size of the service
-    :param response_size: the averaged response size of the service
-
-    :return: a generator object with one element (same behaviour is used in the influxdb library even when only one point is returned from the query)
-    """
-
-    yield {"response_time": response_time, "request_size": request_size, "response_size": response_size}
-
-
-def _service_tags(sfr, endpoint, sf_instance):
-    """
-    Returns a dictionary representing a service measurement (tags only)
-
-    :param sfr: the service router to which the service's endpoint is connected to
-    :param endpoint: the endpoint the service is being deployed on
-    :param sf_instance: the service function instance (FQDN)
-
-    :return: a dictionary with those values
-    """
-
-    return {"sfr": sfr,  "endpoint": endpoint, "sf_instance": sf_instance}
-
-
-def _service_metadata(measurement, sfr, endpoint, sf_instance):
-    """
-    Returns an influxdb-styled metadata about a service measurement.
-
-    :param measurement: the measurement table name
-    :param sfr: the service router to which the service's endpoint is connected to
-    :param endpoint: the endpoint the service is being deployed on
-    :param sf_instance: the service function instance (FQDN)
-
-    :return: a tuple with the first element being the measurement name and the second element being a dictionary with the service measurement tag values
-    """
-
-    return measurement, _service_tags(sfr, endpoint, sf_instance)
-
-
-def service_result_item(measurement, sfr, endpoint, sf_instance, response_time, request_size, response_size):
-    """
-    Returns a full influxdb-styled service measurement item - with tag and field values.
-
-    :param measurement: the measurement table name
-    :param sfr: the service router to which the service's endpoint is connected to
-    :param endpoint: the endpoint the service is being deployed on
-    :param sf_instance: the service function instance (FQDN)
-    :param response_time: the response time of the service
-    :param request_size: the averaged request size of the service
-    :param response_size: the averaged response size of the service
-
-    :return: a tuple with the first element being the result metadata (measurement name and tags) and the second element being the data field points
-    """
-
-    return _service_metadata(measurement, sfr, endpoint, sf_instance), _service_result_point(response_time, request_size, response_size)
-
-
-# The following are auxiliary functions for generating an e2e row used in the unit testing of the aggregator.
-
-def drop_timestamp(d):
-    """
-    Drops the time stamp from a dictionary-represented influx result item object
-
-    :param d: the dictionary object representing a measurement row from influx
-
-    :return: the same dictionary with no timestamp
-    """
-
-    d.pop('time')
-    return d
-
-
-def _generate_e2e_row(path_id, source_sfr, target_sfr, endpoint, sf_instance, delay_forward, delay_reverse, delay_service, avg_request_size, avg_response_size, avg_bandwidth):
-    """
-    Generates a combined averaged measurement about the e2e delay and its contributing parts with default timestamp (set as 0)
-
-    :param path_id: The path identifier, which is a bidirectional path ID for the request and the response path
-    :param source_sfr: source service router
-    :param target_sfr: target service router
-    :param endpoint: endpoint of the media component
-    :param sf_instance: service function instance (media component)
-    :param delay_forward: Path delay (Forward direction)
-    :param delay_reverse: Path delay (Reverse direction)
-    :param delay_service: the media service component response time
-    :param avg_request_size: averaged request size
-    :param avg_response_size: averaged response size
-    :param avg_bandwidth: averaged bandwidth
-
-    :return: a list of dict-formatted reports to post on influx
-    """
-
-    return generate_e2e_delay_report(path_id, source_sfr, target_sfr, endpoint, sf_instance, delay_forward,
-                                     delay_reverse, delay_service, avg_request_size, avg_response_size, avg_bandwidth, 0)[0]
-
-
-def generate_e2e_no_timestamp_row(path_id, source_sfr, target_sfr, endpoint, sf_instance, delay_forward, delay_reverse,
-                                  delay_service, avg_request_size, avg_response_size, avg_bandwidth):
-    """
-    Generates a combined averaged measurement about the e2e delay and its contributing parts (with no timestamp, used for testing)
-
-    :param path_id: The path identifier, which is a bidirectional path ID for the request and the response path
-    :param source_sfr: source service router
-    :param target_sfr: target service router
-    :param endpoint: endpoint of the media component
-    :param sf_instance: service function instance (media component)
-    :param delay_forward: Path delay (Forward direction)
-    :param delay_reverse: Path delay (Reverse direction)
-    :param delay_service: the media service component response time
-    :param avg_request_size: averaged request size
-    :param avg_response_size: averaged response size
-    :param avg_bandwidth: averaged bandwidth
-
-    :return: a list of dict-formatted reports to post on influx
-    """
-
-    return drop_timestamp(_generate_e2e_row(path_id, source_sfr, target_sfr, endpoint, sf_instance, delay_forward, delay_reverse, delay_service,
-                                             avg_request_size, avg_response_size, avg_bandwidth))
diff --git a/src/service/clmcservice/aggregation/test_aggregator.py b/src/service/clmcservice/aggregation/test_aggregator.py
deleted file mode 100644
index fd5befca9f9619b93415ab2205e42abe3994b2d6..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/aggregation/test_aggregator.py
+++ /dev/null
@@ -1,218 +0,0 @@
-#!/usr/bin/python3
-"""
-## © University of Southampton IT Innovation Centre, 2018
-##
-## Copyright in this software belongs to University of Southampton
-## IT Innovation Centre of Gamma House, Enterprise Road,
-## Chilworth Science Park, Southampton, SO16 7NS, UK.
-##
-## This software may not be used, sold, licensed, transferred, copied
-## or reproduced in whole or in part in any manner or form or in or
-## on any media by any person other than in accordance with the terms
-## of the Licence Agreement supplied with the software, or otherwise
-## without the prior written consent of the copyright owners.
-##
-## This software is distributed WITHOUT ANY WARRANTY, without even the
-## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-## PURPOSE, except where stated in the Licence Agreement supplied with
-## the software.
-##
-##      Created By :            Nikolay Stanchev
-##      Created Date :          04-06-2018
-##      Created for Project :   FLAME
-"""
-
-
-from threading import Event
-from unittest import mock
-from clmcservice.aggregation.aggregator import AggregatorThread
-from clmcservice.aggregation.influx_data_interface import MockResultSet, network_result_item, service_result_item, drop_timestamp, generate_e2e_no_timestamp_row
-
-
-class TestAggregation(object):
-    """
-    A unit test to ensure the functionality of the aggregator is correct.
-    """
-
-    ACTUAL_RESULTS = "actual_aggregated_results"  # the attribute name of the actual results data structure
-    EXPECTED_RESULTS = "expected_aggregated_results"  # the attribute name of the expected results data structure
-    FINISHED = "finished_event"  # the attribute name of the flag object, which marks the end of the test
-
-    def points_generator(self, network_items, service_items):
-        """
-        A generator method intended to be used by the mock db client when involving the mocked query() method. It takes the network and service items, and generates a result from
-        those items each time query() is called by taking turns - starts with network result, followed by service result and then it repeats, until all items have been exhausted.
-        Network items and service items are expected to have the same length.
-
-        :param network_items: the network data to generate from
-        :param service_items: the service data to generate from
-
-        :return: a generator object
-        """
-
-        assert len(network_items) == len(service_items), "The data points generator must receive the same number of network items as the number of service items"
-        index = 0
-
-        while not getattr(self, self.FINISHED).is_set():
-            items = network_items[index]
-            yield MockResultSet(items)
-
-            items = service_items[index]
-
-            # before yielding the service data points, check if both sets of data points are enumerated
-            if index == len(network_items)-1:
-                # if so, set the finished flag of the test
-                getattr(self, self.FINISHED).set()
-
-            yield MockResultSet(items)
-
-            index += 1
-
-    def setup_mock_db_client(self, mock_class):
-        """
-        Sets up a mock db client and also defines the expected aggregation results from the test.
-
-        :param mock_class: the mock class used as an influx db client instance
-        :return:
-        """
-
-        setattr(self, self.ACTUAL_RESULTS, [])  # initially, there are no actual results, these are built progressively while the aggregator is running
-        setattr(self, self.EXPECTED_RESULTS, [
-            generate_e2e_no_timestamp_row(path_id="SR1-SR3", source_sfr="SR1", target_sfr="SR3", endpoint="endpoint3", sf_instance="ms1.flame.org", delay_forward=10,
-                                          delay_reverse=15, delay_service=10, avg_request_size=1024, avg_response_size=8, avg_bandwidth=104857600),
-            generate_e2e_no_timestamp_row(path_id="SR1-SR3", source_sfr="SR1", target_sfr="SR3", endpoint="endpoint3", sf_instance="ms1.flame.org", delay_forward=5,
-                                          delay_reverse=25, delay_service=40, avg_request_size=16, avg_response_size=2048, avg_bandwidth=104857600),
-            generate_e2e_no_timestamp_row(path_id="SR1-SR2", source_sfr="SR1", target_sfr="SR2", endpoint="endpoint2", sf_instance="ms2.flame.org", delay_forward=15,
-                                          delay_reverse=35, delay_service=60, avg_request_size=32, avg_response_size=1024, avg_bandwidth=104857600),
-            generate_e2e_no_timestamp_row(path_id="SR4-SR5", source_sfr="SR4", target_sfr="SR5", endpoint="endpoint5", sf_instance="ms5.flame.org", delay_forward=11,
-                                          delay_reverse=25, delay_service=50, avg_request_size=2048, avg_response_size=32, avg_bandwidth=104857600),
-            generate_e2e_no_timestamp_row(path_id="SR1-SR2", source_sfr="SR1", target_sfr="SR2", endpoint="endpoint2", sf_instance="ms2.flame.org", delay_forward=12,
-                                          delay_reverse=5, delay_service=60, avg_request_size=32, avg_response_size=1024, avg_bandwidth=104857600),
-            generate_e2e_no_timestamp_row(path_id="SR1-SR3", source_sfr="SR1", target_sfr="SR3", endpoint="endpoint3", sf_instance="ms1.flame.org", delay_forward=16,
-                                          delay_reverse=25, delay_service=40, avg_request_size=16, avg_response_size=2048, avg_bandwidth=104857600),
-            generate_e2e_no_timestamp_row(path_id="SR10-SR12", source_sfr="SR12", target_sfr="SR10", endpoint="endpoint10", sf_instance="ms4.flame.org", delay_forward=22,
-                                          delay_reverse=3, delay_service=75, avg_request_size=1024, avg_response_size=64, avg_bandwidth=104857600),
-            generate_e2e_no_timestamp_row(path_id="SR14-SR15", source_sfr="SR14", target_sfr="SR15", endpoint="endpoint15", sf_instance="ms2.flame.org", delay_forward=24,
-                                          delay_reverse=27, delay_service=105, avg_request_size=1024, avg_response_size=128, avg_bandwidth=104857600),
-            generate_e2e_no_timestamp_row(path_id="SR14-SR15", source_sfr="SR15", target_sfr="SR14", endpoint="endpoint14", sf_instance="ms1.flame.org", delay_forward=27,
-                                          delay_reverse=24, delay_service=85, avg_request_size=32, avg_response_size=64, avg_bandwidth=104857600),
-            generate_e2e_no_timestamp_row(path_id="SR8-SR18", source_sfr="SR18", target_sfr="SR8", endpoint="endpoint8", sf_instance="ms2.flame.org", delay_forward=18,
-                                          delay_reverse=19, delay_service=75, avg_request_size=2048, avg_response_size=16, avg_bandwidth=104857600),
-        ])  # defines the expected rows from the aggregation
-        setattr(self, self.FINISHED, Event())
-
-        # initialises the influx data generator, which is involved each time the query() method of the mock db client is called
-        mock_points = self.points_generator(
-            # network items is a list of tuples, each tuple represents a result from a query; each time query() is called and a network measurement must be generated, then one of
-            # these tuples is generated, empty tuple means result with no points
-            network_items=[
-                (
-                    network_result_item(measurement="network_delays", path="SR1-SR3", source="SR1", target="SR3", net_latency=10, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR1-SR3", source="SR3", target="SR1", net_latency=15, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR1-SR33", source="SR1", target="SR33", net_latency=15, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR2-SR11", source="SR11", target="SR2", net_latency=15, net_bandwidth=104857600),
-                ),
-                (
-                    network_result_item(measurement="network_delays", path="SR1-SR3", source="SR1", target="SR3", net_latency=5, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR1-SR3", source="SR3", target="SR1", net_latency=25, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR1-SR2", source="SR1", target="SR2", net_latency=15, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR1-SR2", source="SR2", target="SR1", net_latency=35, net_bandwidth=104857600),
-                ),
-                (
-                    network_result_item(measurement="network_delays", path="SR4-SR5", source="SR4", target="SR5", net_latency=5, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR4-SR5", source="SR5", target="SR4", net_latency=25, net_bandwidth=104857600),
-                ),
-                (),
-                (
-                    network_result_item(measurement="network_delays", path="SR4-SR5", source="SR4", target="SR5", net_latency=5, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR4-SR5", source="SR5", target="SR4", net_latency=25, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR0-SR1", source="SR0", target="SR1", net_latency=5, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR0-SR1", source="SR1", target="SR0", net_latency=25, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR10-SR12", source="SR10", target="SR12", net_latency=11, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR10-SR12", source="SR12", target="SR10", net_latency=22, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR14-SR15", source="SR14", target="SR15", net_latency=24, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR14-SR15", source="SR15", target="SR14", net_latency=26, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR8-SR18", source="SR18", target="SR8", net_latency=18, net_bandwidth=104857600),
-                ),
-                (
-                    network_result_item(measurement="network_delays", path="SR4-SR5", source="SR4", target="SR5", net_latency=11, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR1-SR2", source="SR1", target="SR2", net_latency=12, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR1-SR2", source="SR2", target="SR1", net_latency=5, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR1-SR3", source="SR1", target="SR3", net_latency=16, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR10-SR12", source="SR10", target="SR12", net_latency=3, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR14-SR15", source="SR15", target="SR14", net_latency=27, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR16-SR17", source="SR16", target="SR17", net_latency=27, net_bandwidth=104857600),
-                    network_result_item(measurement="network_delays", path="SR8-SR18", source="SR8", target="SR18", net_latency=19, net_bandwidth=104857600),
-                )
-            ],
-            # service items is a list of tuples, each tuple represents a result from a query; each time query() is called and a service measurement must be generated, then one of
-            # these tuples is generated, empty tuple means result with no points
-            service_items=[
-                (
-                    service_result_item(measurement="service_delays", sfr="SR3", endpoint="endpoint3", sf_instance="ms1.flame.org", response_time=10, request_size=1024, response_size=8),
-                    service_result_item(measurement="service_delays", sfr="SR33", endpoint="endpoint33", sf_instance="ms2.flame.org", response_time=20, request_size=4096, response_size=8),
-                    service_result_item(measurement="service_delays", sfr="SR11", endpoint="endpoint11", sf_instance="ms3.flame.org", response_time=30, request_size=1024, response_size=8),
-                ),
-                (
-                    service_result_item(measurement="service_delays", sfr="SR3", endpoint="endpoint3", sf_instance="ms1.flame.org", response_time=40, request_size=16, response_size=2048),
-                    service_result_item(measurement="service_delays", sfr="SR2", endpoint="endpoint2", sf_instance="ms2.flame.org", response_time=60, request_size=32, response_size=1024),
-                ),
-                (
-                    service_result_item(measurement="service_delays", sfr="SR6", endpoint="endpoint6", sf_instance="ms1.flame.org", response_time=60, request_size=1024, response_size=8),
-                    service_result_item(measurement="service_delays", sfr="SR7", endpoint="endpoint7", sf_instance="ms1.flame.org", response_time=70, request_size=1024, response_size=8),
-                ),
-                (
-                    service_result_item(measurement="service_delays", sfr="SR6", endpoint="endpoint6", sf_instance="ms1.flame.org", response_time=65, request_size=2048, response_size=16),
-                    service_result_item(measurement="service_delays", sfr="SR8", endpoint="endpoint8", sf_instance="ms2.flame.org", response_time=75, request_size=2048, response_size=16),
-                    service_result_item(measurement="service_delays", sfr="SR9", endpoint="endpoint9", sf_instance="ms3.flame.org", response_time=25, request_size=2048, response_size=16),
-                ),
-                (),
-                (
-                    service_result_item(measurement="service_delays", sfr="SR5", endpoint="endpoint5", sf_instance="ms5.flame.org", response_time=50, request_size=2048, response_size=32),
-                    service_result_item(measurement="service_delays", sfr="SR10", endpoint="endpoint10", sf_instance="ms4.flame.org", response_time=75, request_size=1024, response_size=64),
-                    service_result_item(measurement="service_delays", sfr="SR15", endpoint="endpoint15", sf_instance="ms2.flame.org", response_time=105, request_size=1024, response_size=128),
-                    service_result_item(measurement="service_delays", sfr="SR14", endpoint="endpoint14", sf_instance="ms1.flame.org", response_time=85, request_size=32, response_size=64),
-                    service_result_item(measurement="service_delays", sfr="SR16", endpoint="endpoint16", sf_instance="ms1.flame.org", response_time=85, request_size=32, response_size=64),
-                )
-            ]
-        )
-
-        # implement the query() and write_points() methods of the mock db client
-        mock_class.query = lambda query: next(mock_points)  # query() returns the next element of the mock_points generator
-        mock_class.write_points = lambda points: getattr(self, self.ACTUAL_RESULTS).append(drop_timestamp(points[0]))  # write_points() adds aggregated rows to actual results list
-
-        # in the end of the test, we can compare the expected results with the actual results that were generated during the aggregation process
-
-    @mock.patch('clmcservice.aggregation.aggregator.InfluxDBClient', autospec=True)
-    def test_aggregator(self, MockDBClient):
-        """
-        The actual test that's executed when running pytest.
-
-        :param MockDBClient: a mock object argument passed by the mock.patch decorator. The decorator changes all occurrences of InfluxDBClient in the aggregator's code to the
-        return value of this MockDBClient object
-        """
-
-        # set up the mock db client by providing implementations for the necessary methods (query and write_points)
-        self.setup_mock_db_client(MockDBClient.return_value)
-
-        # start the aggregator as a thread, report period set to 2 so that the unit tests is not taking too long
-        t = AggregatorThread(report_period=2)
-        t.start()
-
-        # wait until the finished flag has been set
-        getattr(self, self.FINISHED).wait()
-
-        # stop the thread when the aggregation has finished
-        t.stop()
-
-        # compare the expected results with teh actual results that were collected during the aggregation process
-        expected_results = getattr(self, self.EXPECTED_RESULTS)
-        actual_results = getattr(self, self.ACTUAL_RESULTS)
-        assert type(actual_results) is list
-        assert type(expected_results) is list
-        assert len(actual_results) == len(expected_results), "Actual and expected result differ in length."
-
-        # we compare sorted versions of the expected and actual results; this is because the aggregator implementation uses dictionary for efficiency purposes, hence the order of
-        # the collected results may vary, especially on different OS; hence, we only care about the two list of results to contain the same elements
-        assert sorted(actual_results, key=lambda k: k['tags']['path_ID']) == sorted(expected_results, key=lambda k: k['tags']['path_ID']), \
-            "Test failure - aggregation process returns incorrect results."
diff --git a/src/service/clmcservice/aggregationapi/__init__.py b/src/service/clmcservice/aggregationapi/__init__.py
deleted file mode 100644
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/aggregationapi/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/src/service/clmcservice/aggregationapi/tests.py b/src/service/clmcservice/aggregationapi/tests.py
deleted file mode 100644
index 086f3d378bc500f590ee386c6a50b246b04dac37..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/aggregationapi/tests.py
+++ /dev/null
@@ -1,548 +0,0 @@
-#!/usr/bin/python3
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          15-05-2018
-//      Created for Project :   FLAME
-"""
-
-from pyramid import testing
-from pyramid.httpexceptions import HTTPBadRequest
-from time import sleep
-from clmcservice.aggregationapi.utilities import CONF_FILE_ATTRIBUTE, CONF_OBJECT, AGGREGATOR_CONFIG_SECTION, CONFIG_ATTRIBUTES, PROCESS_ATTRIBUTE, RUNNING_FLAG, MALFORMED_FLAG, URL_REGEX
-import pytest
-import os
-import signal
-import configparser
-
-
-class TestAggregatorAPI(object):
-    """
-    A pytest-implementation test for the aggregator API calls
-    """
-
-    @pytest.fixture(autouse=True)
-    def app_config(self):
-        """
-        A fixture to implement setUp/tearDown functionality for all tests by initializing configuration structure for the web service
-        """
-
-        self.registry = testing.setUp()
-        config = configparser.ConfigParser()
-        config[AGGREGATOR_CONFIG_SECTION] = {'aggregator_report_period': 5, 'aggregator_database_name': 'CLMCMetrics', 'aggregator_database_url': "http://172.40.231.51:8086"}
-        self.registry.add_settings({'configuration_object': config, 'aggregator_running': False, 'malformed': False, 'configuration_file_path': "/etc/flame/clmc/service.conf"})
-
-        yield
-
-        testing.tearDown()
-
-    def test_GET_config(self):
-        """
-        Tests the GET method for the configuration of the aggregator.
-        """
-
-        from clmcservice.aggregationapi.views import AggregatorConfig  # nested import so that importing the class view is part of the test itself
-
-        setup_config = self.registry.get_settings()[CONF_OBJECT]
-
-        # test an error is thrown when aggregator is in unconfigured state
-        self.registry.get_settings()[CONF_OBJECT] = None
-        request = testing.DummyRequest()
-        error_raised = False
-        try:
-            AggregatorConfig(request).get()
-        except HTTPBadRequest:
-            error_raised = True
-        assert error_raised, "Error must be raised in case of a not configured aggregator."
-        self.registry.get_settings()[CONF_OBJECT] = setup_config
-
-        # test GET method when aggregator is configured
-        assert int(self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_report_period')) == 5, "Initial report period is 5 seconds."
-        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_name') == 'CLMCMetrics', "Initial database name the aggregator uses is CLMCMetrics."
-        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_url') == "http://172.40.231.51:8086", "Initial aggregator url is http://172.40.231.51:8086"
-
-        request = testing.DummyRequest()
-        response = AggregatorConfig(request).get()
-
-        assert response == {'aggregator_report_period': 5,
-                            'aggregator_database_name': 'CLMCMetrics',
-                            'aggregator_database_url': "http://172.40.231.51:8086"}, "Response must be a dictionary representing a JSON object with the correct configuration data of the aggregator."
-
-        assert int(self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_report_period')) == 5, "A GET request must not modify the aggregator configuration data."
-        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_name') == 'CLMCMetrics', "A GET request must not modify the aggregator configuration data."
-        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_url') == "http://172.40.231.51:8086", "A GET request must not modify the aggregator configuration data."
-
-    @pytest.mark.parametrize("input_body, output_value", [
-        ('{"aggregator_report_period": 10, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://171.40.231.51:8086"}',
-         {'aggregator_report_period': 10, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://171.40.231.51:8086"}),
-        ('{"aggregator_report_period": 15, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.50.231.51:8086"}',
-         {'aggregator_report_period': 15, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://172.50.231.51:8086"}),
-        ('{"aggregator_report_period": 20, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.60.231.51:8086"}',
-         {'aggregator_report_period': 20, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://172.60.231.51:8086"}),
-        ('{"aggregator_report_period": 25, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.60.231.51:8086"}',
-         {'aggregator_report_period': 25, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://172.60.231.51:8086"}),
-        ('{"aggregator_report_period": 200, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "https://172.50.231.51:8086"}',
-         {'aggregator_report_period': 200, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "https://172.50.231.51:8086"}),
-        ('{"aggregator_report_period": 150, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "https://localhost:8086"}',
-         {'aggregator_report_period': 150, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "https://localhost:8086"}),
-        ("{aggregator_report_period: 2hb5, aggregator_database_name: CLMCMetrics, aggregator_database_url: http://172.60.231.51:8086}", None),
-        ("{aggregator_report_period: 250-, aggregator_database_name: CLMCMetrics, aggregator_database_url: http://172.60.231.52:8086}", None),
-        ("{aggregator_report_period: 25, aggregator_database_name: CLMCMetrics, aggregator_database_url: ftp://172.60.231.51:8086}", None),
-        ("{aggregator_report_period: 25, aggregator_database_name: CLMCMetrics, aggregator_database_url: http://172.60.231.51:8086/query param}", None),
-        ("{aggregator_report_period: 250, aggregator_database_name: CLMCMetrics, aggregator_database_url: http://172.60.231.52:808686}", None),
-        ("{}", None),
-        ("{aggregator_running: true}", None),
-    ])
-    def test_PUT_config(self, input_body, output_value):
-        """
-        Tests the PUT method for the configuration of the aggregator
-        :param input_body: the input body parameter
-        :param output_value: the expected output value, None for expecting an Exception
-        """
-
-        from clmcservice.aggregationapi.views import AggregatorConfig, AggregatorController  # nested import so that importing the class view is part of the test itself
-
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert int(self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_report_period')) == 5, "Initial report period is 5 seconds."
-        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_name') == 'CLMCMetrics', "Initial database name the aggregator uses is CLMCMetrics."
-        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_url') == "http://172.40.231.51:8086", "Initial aggregator url is http://172.40.231.51:8086"
-
-        request = testing.DummyRequest()
-        request.body = input_body.encode(request.charset)
-
-        if output_value is not None:
-            response = AggregatorConfig(request).put()
-            assert response == output_value, "Response of PUT request must include the new configuration of the aggregator"
-
-            for attribute in CONFIG_ATTRIBUTES:
-                assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION][attribute] == str(output_value[attribute]), "Aggregator settings configuration is not updated."
-
-            assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Aggregator running status should not be updated after a configuration update."
-
-            # assert that the conf file is updated
-            updated_conf = configparser.ConfigParser()
-            conf_file = self.registry.get_settings().get(CONF_FILE_ATTRIBUTE)
-            assert updated_conf.read(conf_file) == [conf_file]
-            assert AGGREGATOR_CONFIG_SECTION in updated_conf.sections()
-
-            for attribute in CONFIG_ATTRIBUTES:
-                assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION][attribute] == updated_conf[AGGREGATOR_CONFIG_SECTION][attribute], "Aggregator settings configuration is not updated."
-
-        else:
-            error_raised = False
-            try:
-                AggregatorConfig(request).put()
-            except HTTPBadRequest:
-                error_raised = True
-
-            assert error_raised, "Error must be raised in case of an invalid argument."
-
-    def test_start(self):
-        """
-        Tests starting the aggregator through an API call.
-        """
-
-        from clmcservice.aggregationapi.views import AggregatorController  # nested import so that importing the class view is part of the test itself
-
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
-
-        # test starting the aggregator when in unconfigured state
-        setup_config = self.registry.get_settings()[CONF_OBJECT]
-        self.registry.get_settings()[CONF_OBJECT] = None
-        request = testing.DummyRequest()
-        input_body = '{"action": "start"}'
-        request.body = input_body.encode(request.charset)
-        error_raised = False
-        try:
-            AggregatorController(request).put()
-        except HTTPBadRequest:
-            error_raised = True
-        assert error_raised, "Error must be raised in case of a not configured aggregator."
-        self.registry.get_settings()[CONF_OBJECT] = setup_config
-
-        # test starting the aggregation when in configured state
-        request = testing.DummyRequest()
-        input_body = '{"action": "start"}'
-        request.body = input_body.encode(request.charset)
-
-        response = AggregatorController(request).put()
-        assert response == {RUNNING_FLAG: True}, "The aggregator should have been started."
-        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been started."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is not None, "Aggregator process should have been initialized."
-
-        # kill the started process after the test is over
-        pid = request.registry.settings[PROCESS_ATTRIBUTE].pid
-        os.kill(pid, signal.SIGTERM)
-
-    def test_stop(self):
-        """
-        Tests stopping the aggregator through an API call.
-        """
-
-        from clmcservice.aggregationapi.views import AggregatorController  # nested import so that importing the class view is part of the test itself
-
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
-
-        # test stopping the aggregator when in unconfigured state
-        setup_config = self.registry.get_settings()[CONF_OBJECT]
-        self.registry.get_settings()[CONF_OBJECT] = None
-        request = testing.DummyRequest()
-        input_body = '{"action": "stop"}'
-        request.body = input_body.encode(request.charset)
-        error_raised = False
-        try:
-            AggregatorController(request).put()
-        except HTTPBadRequest:
-            error_raised = True
-        assert error_raised, "Error must be raised in case of a not configured aggregator."
-        self.registry.get_settings()[CONF_OBJECT] = setup_config
-
-        # test stopping the aggregation when in configured state
-        # send a start request to trigger the aggregator
-        request = testing.DummyRequest()
-        input_body = '{"action": "start"}'
-        request.body = input_body.encode(request.charset)
-        AggregatorController(request).put()
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is not None, "Aggregator process should have been initialized."
-        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Aggregator process should have been initialized."
-
-        # test stopping the aggregator process when it is running
-        request = testing.DummyRequest()
-        input_body = '{"action": "stop"}'
-        request.body = input_body.encode(request.charset)
-
-        response = AggregatorController(request).put()
-        assert response == {RUNNING_FLAG: False}, "The aggregator should have been stopped."
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been stopped."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Aggregator process should have been terminated."
-
-        sleep(2)  # put a 2 seconds timeout so that the aggregator process can terminate
-
-        # test stopping the aggregator process when it is not running
-        request = testing.DummyRequest()
-        input_body = '{"action": "stop"}'
-        request.body = input_body.encode(request.charset)
-
-        response = AggregatorController(request).put()
-        assert response == {RUNNING_FLAG: False}, "The aggregator should have been stopped."
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been stopped."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Aggregator process should have been terminated."
-
-    def test_restart(self):
-        """
-        Tests restarting the aggregator through an API call.
-        """
-
-        from clmcservice.aggregationapi.views import AggregatorController  # nested import so that importing the class view is part of the test itself
-
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
-
-        # test restarting the aggregator when in unconfigured state
-        setup_config = self.registry.get_settings()[CONF_OBJECT]
-        self.registry.get_settings()[CONF_OBJECT] = None
-        request = testing.DummyRequest()
-        input_body = '{"action": "restart"}'
-        request.body = input_body.encode(request.charset)
-        error_raised = False
-        try:
-            AggregatorController(request).put()
-        except HTTPBadRequest:
-            error_raised = True
-        assert error_raised, "Error must be raised in case of a not configured aggregator."
-        self.registry.get_settings()[CONF_OBJECT] = setup_config
-
-        # test restarting the aggregation when in configured state
-        # test restarting the aggregator process when it is stopped
-        request = testing.DummyRequest()
-        input_body = '{"action": "restart"}'
-        request.body = input_body.encode(request.charset)
-
-        response = AggregatorController(request).put()
-        assert response == {RUNNING_FLAG: True}, "The aggregator should have been restarted."
-        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been restarted."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE), "The aggregator process should have been reinitialised."
-
-        # test restarting the aggregator process when it is running
-        request = testing.DummyRequest()
-        input_body = '{"action": "restart"}'
-        request.body = input_body.encode(request.charset)
-
-        response = AggregatorController(request).put()
-        assert response == {RUNNING_FLAG: True}, "The aggregator should have been restarted."
-        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been restarted."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE), "The aggregator process should have been reinitialised."
-
-        # kill the started process after the test is over
-        pid = request.registry.settings[PROCESS_ATTRIBUTE].pid
-        os.kill(pid, signal.SIGTERM)
-
-    @pytest.mark.parametrize("input_body", [
-        '{"action": "malformed"}',
-        '{"action": true}',
-        '{"action": false}',
-        '{"action": 1}',
-        '{invalid-json}',
-        '{"action": "start", "unneeded_argument": false}',
-        '{}'
-    ])
-    def test_malformed_actions(self, input_body):
-        """
-        Tests sending a malformed type of action to the aggregator through an API call.
-        """
-
-        from clmcservice.aggregationapi.views import AggregatorController  # nested import so that importing the class view is part of the test itself
-
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
-
-        # test restarting the aggregator process when it is running
-        request = testing.DummyRequest()
-        input_body = input_body
-        request.body = input_body.encode(request.charset)
-
-        error_raised = False
-        try:
-            AggregatorController(request).put()
-        except HTTPBadRequest:
-            error_raised = True
-
-        assert error_raised
-
-    def test_GET_status(self):
-        """
-        Tests the GET method for the status of the aggregator.
-        """
-
-        from clmcservice.aggregationapi.views import AggregatorController  # nested import so that importing the class view is part of the test itself
-
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
-
-        request = testing.DummyRequest()
-        response = AggregatorController(request).get()
-
-        assert response == {'aggregator_running': False}, "Response must be a dictionary representing a JSON object with the correct status data of the aggregator."
-
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "A GET request must not modify the aggregator status flag."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "A GET request must not start the aggregator process."
-
-        # test status with malformed configuration
-        # start the aggregator
-        request = testing.DummyRequest()
-        input_body = '{"action": "start"}'
-        request.body = input_body.encode(request.charset)
-        AggregatorController(request).put()
-        self.registry.get_settings()[MALFORMED_FLAG] = True
-
-        request = testing.DummyRequest()
-        response = AggregatorController(request).get()
-
-        assert response == {'aggregator_running': True,
-                            'malformed': True,
-                            'comment': 'Aggregator is running in a malformed state - it uses an old version of the configuration. Please, restart it so that the updated configuration is used.'}, \
-            "Response must be a dictionary representing a JSON object with the correct status data of the aggregator."
-
-        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "A GET request must not modify the aggregator status flag."
-        assert self.registry.get_settings().get(MALFORMED_FLAG), "A GET request must not modify the aggregator malformed flag."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is not None, "A GET request must not stop the aggregator process."
-
-        # kill the started process after the test is over
-        pid = request.registry.settings[PROCESS_ATTRIBUTE].pid
-        os.kill(pid, signal.SIGTERM)
-
-    def test_malformed_flag_behaviour(self):
-        """
-        Tests the behaviour of the malformed configuration flag of the aggregator when doing a sequence of API calls.
-        """
-
-        from clmcservice.aggregationapi.views import AggregatorController, AggregatorConfig  # nested import so that importing the class view is part of the test itself
-
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert not self.registry.get_settings().get(MALFORMED_FLAG), "Initially aggregator is not in a malformed state"
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
-        assert int(self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_report_period')) == 5, "Initial report period is 5 seconds."
-        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_name') == 'CLMCMetrics', "Initial database name the aggregator uses is CLMCMetrics."
-        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_url') == "http://172.40.231.51:8086", "Initial aggregator url is http://172.40.231.51:8086"
-
-        # start the aggregator with the default configuration
-        request = testing.DummyRequest()
-        input_body = '{"action": "start"}'
-        request.body = input_body.encode(request.charset)
-
-        response = AggregatorController(request).put()
-        assert response == {RUNNING_FLAG: True}, "The aggregator should have been started."
-
-        # update the configuration of the aggregator while it is running
-        config_body = '{"aggregator_report_period": 15, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.50.231.51:8086"}'
-        output_body = {'aggregator_report_period': 15, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://172.50.231.51:8086", 'malformed': True,
-                       'comment': 'Aggregator is running in a malformed state - it uses an old version of the configuration. Please, restart it so that the updated configuration is used.'}
-        request = testing.DummyRequest()
-        request.body = config_body.encode(request.charset)
-        response = AggregatorConfig(request).put()
-        assert response == output_body, "Response of PUT request must include the new configuration of the aggregator"
-
-        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator shouldn't be stopped when the configuration is updated."
-        assert self.registry.get_settings().get(MALFORMED_FLAG), "The malformed flag should be set when the configuration is updated while the process is running."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is not None, "The aggregator shouldn't be stopped when the configuration is updated."
-
-        # check that the malformed flag has been updated through a GET call
-        request = testing.DummyRequest()
-        response = AggregatorController(request).get()
-        assert response == {'aggregator_running': True,
-                            'malformed': True,
-                            'comment': 'Aggregator is running in a malformed state - it uses an old version of the configuration. Please, restart it so that the updated configuration is used.'}, \
-            "Response must be a dictionary representing a JSON object with the correct status data of the aggregator."
-
-        # restart the aggregator with the new configuration
-        request = testing.DummyRequest()
-        input_body = '{"action": "restart"}'
-        request.body = input_body.encode(request.charset)
-        response = AggregatorController(request).put()
-        assert response == {RUNNING_FLAG: True}, "The aggregator should have been restarted."
-        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been restarted."
-        assert not self.registry.get_settings().get(MALFORMED_FLAG), "The malformed flag should have been reset to False."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is not None, "The aggregator should have been restarted."
-
-        # update the configuration again while the aggregator is running
-        config_body = '{"aggregator_report_period": 30, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.50.231.51:8086"}'
-        output_body = {'aggregator_report_period': 30, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://172.50.231.51:8086", 'malformed': True,
-                       'comment': 'Aggregator is running in a malformed state - it uses an old version of the configuration. Please, restart it so that the updated configuration is used.'}
-        request = testing.DummyRequest()
-        request.body = config_body.encode(request.charset)
-        response = AggregatorConfig(request).put()
-        assert response == output_body, "Response of PUT request must include the new configuration of the aggregator"
-
-        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator shouldn't be stopped when the configuration is updated."
-        assert self.registry.get_settings().get(MALFORMED_FLAG), "The malformed flag should be set when the configuration is updated while the process is running."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is not None, "The aggregator shouldn't be stopped when the configuration is updated."
-
-        # stop the aggregator - this should also reset the malformed status flag
-        request = testing.DummyRequest()
-        input_body = '{"action": "stop"}'
-        request.body = input_body.encode(request.charset)
-        response = AggregatorController(request).put()
-        assert response == {RUNNING_FLAG: False}, "The aggregator should have been stopped."
-        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been stopped."
-        assert not self.registry.get_settings().get(MALFORMED_FLAG), "The malformed flag should have been reset to False."
-        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "The aggregator should have been stopped."
-
-    def test_unconfigured_state(self):
-        """
-        Tests the behaviour of the service when in unconfigured state.
-        """
-
-        from clmcservice.aggregationapi.views import AggregatorConfig, AggregatorController
-
-        self.registry.get_settings()[CONF_OBJECT] = None  # unconfigured state - conf object is None
-
-        # when doing a GET for the configuration we expect a bad request if the service is in unconfigured state
-        bad_request = False
-        bad_request_msg = None
-        try:
-            request = testing.DummyRequest()
-            AggregatorConfig(request).get()
-        except HTTPBadRequest as err:
-            bad_request = True
-            bad_request_msg = err.message
-
-        assert bad_request
-        assert bad_request_msg == "Aggregator has not been configured, yet. Send a PUT request to /aggregator/config with a JSON body of the configuration."
-
-        # when doing a PUT for the aggregator to start/stop/restart we expect a bad request if the service is in unconfigured state
-        for action in ('start', 'stop', 'restart'):
-            bad_request = False
-            bad_request_msg = None
-            try:
-                request = testing.DummyRequest()
-                request.body = ('{"action": "' + action + '"}').encode(request.charset)
-                AggregatorController(request).put()
-            except HTTPBadRequest as err:
-                bad_request = True
-                bad_request_msg = err.message
-
-            assert bad_request
-            assert bad_request_msg == "You must configure the aggregator before controlling it. Send a PUT request to /aggregator/config with a JSON body of the configuration."
-
-        # configure the aggregator
-        input_body = '{"aggregator_report_period": 10, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://171.40.231.51:8086"}'
-        output_body = {'aggregator_report_period': 10, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://171.40.231.51:8086"}
-        request = testing.DummyRequest()
-        request.body = input_body.encode(request.charset)
-        response = AggregatorConfig(request).put()
-        assert response == output_body
-
-        request = testing.DummyRequest()
-        assert AggregatorConfig(request).get() == output_body
-
-
-class TestRegexURL(object):
-    """
-    A pytest-implementation test for the regular expression the service uses to validate the database URL
-    """
-
-    @pytest.mark.parametrize("valid_url", [
-        "http://localhost:8080/",
-        "https://localhost:80/url/path",
-        "https://192.168.20.20/?query=param",
-        "http://custom.domain.com",
-        "http://domain.net:8888/",
-        "https://10.160.150.4:21",
-        "http://localhost:12345",
-        "http://domain.com:21/path",
-        "http://domain.com:32?path",
-        "http://domain.com:43#path"
-    ])
-    def test_valid_urls(self, valid_url):
-        """
-        Tests that the regular expression can detect valid URLs.
-
-        :param valid_url: a string representing a valid URL
-        """
-
-        matched_object = URL_REGEX.match(valid_url)
-
-        assert matched_object is not None, "The regular expression fails in validating a correct URL."
-
-        assert matched_object.group() is not None, "The matched object should return the full-match string"
-
-    @pytest.mark.parametrize("invalid_url", [
-        "ftp://localhost:80/url/path",
-        "tcp://192.168.20.20/?query=param",
-        "http:/localhost:80/",
-        "https//localhost:8080/",
-        "https://domain:1234/url/path",
-        "http://domain.com:808080/",
-        "http://localhost:8-080/",
-        "http://localhost:port80/",
-        "http://domain.com:8080url/path",
-        "http://domain.com:8080/?url path",
-    ])
-    def test_invalid_urls(self, invalid_url):
-        """
-        Tests that the regular expression can detect invalid URLs.
-
-        :param invalid_url: a string representing an invalid URL
-        """
-
-        matched_object = URL_REGEX.match(invalid_url)
-
-        assert matched_object is None, "The regular expression fails in detecting an invalid URL."
diff --git a/src/service/clmcservice/aggregationapi/utilities.py b/src/service/clmcservice/aggregationapi/utilities.py
deleted file mode 100644
index 2375300730d47d2f6927961674f8064bc91bfe3a..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/aggregationapi/utilities.py
+++ /dev/null
@@ -1,180 +0,0 @@
-#!/usr/bin/python3
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          15-05-2018
-//      Created for Project :   FLAME
-"""
-
-from json import loads
-from re import compile, IGNORECASE
-from configparser import ConfigParser
-
-CONF_FILE_ATTRIBUTE = 'configuration_file_path'  # the attribute pointing to the configuration file path
-CONF_OBJECT = 'configuration_object'  # the attribute, which stores the service configuration object
-
-AGGREGATOR_CONFIG_SECTION = "AGGREGATOR"  # the section in the configuration holding all the configuration attributes declared below
-CONFIG_ATTRIBUTES = ('aggregator_report_period', 'aggregator_database_name', 'aggregator_database_url')  # all of the configuration attributes - to be used as dictionary keys
-
-RUNNING_FLAG = 'aggregator_running'  # Attribute for storing the flag, which shows whether the aggregator is running or not - to be used as a dictionary key
-
-PROCESS_ATTRIBUTE = 'aggregator_process'  # Attribute for storing the process object of the aggregator - to be used as a dictionary key
-
-# a 'malformed' running state of the aggregator is when the configuration is updated, but the aggregator is not restarted so it is running with an old version of the conf.
-MALFORMED_FLAG = 'malformed'  # Attribute for storing the flag, which shows whether the aggregator is running in an malformed state or not - to be used as a dictionary key
-
-# used to indicate a malformed configuration message
-COMMENT_ATTRIBUTE = 'comment'
-COMMENT_VALUE = 'Aggregator is running in a malformed state - it uses an old version of the configuration. Please, restart it so that the updated configuration is used.'
-
-URL_REGEX = compile(
-    r'^https?://'  # http:// or https://
-    r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'  # domain, e.g. example.domain.com
-    r'localhost|'  # or localhost...
-    r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'  # or IP address (IPv4 format)
-    r'(?::\d{2,5})?'  # optional port number
-    r'(?:[/?#][^\s]*)?$',  # URL path or query parameters
-    IGNORECASE)
-
-
-def validate_config_content(configuration):
-    """
-    A utility function to validate a configuration string representing a JSON dictionary.
-
-    :param configuration: the configuration string to validate
-    :return the validated configuration dictionary object with the values converted to their required type
-    :raise AssertionError: if the argument is not a valid configuration
-    """
-
-    global CONFIG_ATTRIBUTES
-
-    try:
-        configuration = loads(configuration)
-    except:
-        raise AssertionError("Configuration must be a JSON object.")
-
-    assert len(configuration) == len(CONFIG_ATTRIBUTES), "Configuration mustn't contain a different number of attributes than the number of required ones."
-
-    for attribute in CONFIG_ATTRIBUTES:
-        assert attribute in configuration, "Required attribute not found in the request content."
-
-    assert type(configuration.get('aggregator_report_period')) == int, "Report period must be an integer, received {0} instead.".format(configuration.get('aggregator_report_period'))
-
-    assert configuration.get('aggregator_report_period') > 0, "Report period must be a positive integer, received {0} instead.".format(configuration.get('aggregator_report_period'))
-
-    assert URL_REGEX.match(configuration.get('aggregator_database_url')) is not None, "The aggregator must have a valid database URL in its configuration, received {0} instead.".format(configuration.get('aggregator_database_url'))
-
-    return configuration
-
-
-def validate_action_content(content):
-    """
-    A utility function to validate a content string representing a JSON dictionary.
-
-    :param content: the content string to validate
-    :return: the validated content dictionary
-    :raise AssertionError: if the argument is not a valid json content
-    """
-
-    try:
-        content = loads(content)
-    except:
-        raise AssertionError("Content must be a JSON object.")
-
-    assert len(content) == 1, "Content mustn't contain more attributes than the required one."
-
-    assert content['action'] in ('start', 'stop', 'restart')
-
-    return content
-
-
-def validate_conf_file(conf_file_path):
-    """
-    Validates the aggregator's configuration file - checks for existence of the file path, whether it can be parsed as a configuration file and
-    whether it contains the required configuration attributes.
-
-    :param conf_file_path: the configuration file path to check
-
-    :return: the parsed configuration if valid, None otherwise
-    """
-
-    global AGGREGATOR_CONFIG_SECTION, CONFIG_ATTRIBUTES
-
-    conf = ConfigParser()
-    result = conf.read(conf_file_path)
-
-    # if result doesn't contain one element, namely the conf_file_path,
-    # then the configuration file cannot be parsed for some reason (doesn't exist, cannot be opened, invalid, etc.)
-    if len(result) == 0:
-        return None
-
-    if AGGREGATOR_CONFIG_SECTION not in conf.sections():
-        return None  # the config should include a section called AGGREGATOR
-
-    for key in CONFIG_ATTRIBUTES:
-        if key not in conf[AGGREGATOR_CONFIG_SECTION]:
-            return None  # the configuration must include each configuration attribute
-
-    try:
-        int(conf[AGGREGATOR_CONFIG_SECTION]['aggregator_report_period'])
-    except ValueError:
-        return None  # the configuration must contain a valid integer for the aggregator's report period
-
-    return conf
-
-
-def generate_e2e_delay_report(path_id, source_sfr, target_sfr, endpoint, sf_instance, delay_forward, delay_reverse, delay_service, avg_request_size, avg_response_size, avg_bandwidth, time):
-    """
-    Generates a combined averaged measurement about the e2e delay and its contributing parts
-
-    :param path_id: The path identifier, which is a bidirectional path ID for the request and the response path
-    :param source_sfr: source service router
-    :param target_sfr: target service router
-    :param endpoint: endpoint of the media component
-    :param sf_instance: service function instance (media component)
-    :param delay_forward: Path delay (Forward direction)
-    :param delay_reverse: Path delay (Reverse direction)
-    :param delay_service: the media service component response time
-    :param avg_request_size: averaged request size
-    :param avg_response_size: averaged response size
-    :param avg_bandwidth: averaged bandwidth
-    :param time: measurement timestamp
-    :return: a list of dict-formatted reports to post on influx
-    """
-
-    result = [{"measurement": "e2e_delays",
-               "tags": {
-                   "path_ID": path_id,
-                   "source_SFR": source_sfr,
-                   "target_SFR": target_sfr,
-                   "endpoint": endpoint,
-                   "sf_instance": sf_instance
-               },
-               "fields": {
-                   "delay_forward": float(delay_forward),
-                   "delay_reverse": float(delay_reverse),
-                   "delay_service": float(delay_service),
-                   "avg_request_size": float(avg_request_size),
-                   "avg_response_size": float(avg_response_size),
-                   "avg_bandwidth": float(avg_bandwidth)
-               },
-               "time": int(1000000000*time)
-               }]
-
-    return result
diff --git a/src/service/clmcservice/aggregationapi/views.py b/src/service/clmcservice/aggregationapi/views.py
deleted file mode 100644
index 154e3511de170ff0de2acbd885c9d2bcc8a338c7..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/aggregationapi/views.py
+++ /dev/null
@@ -1,252 +0,0 @@
-#!/usr/bin/python3
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          15-05-2018
-//      Created for Project :   FLAME
-"""
-
-from pyramid.view import view_defaults, view_config
-from pyramid.httpexceptions import HTTPBadRequest
-from subprocess import Popen
-from clmcservice.aggregationapi.utilities import validate_config_content, validate_action_content, \
-    CONF_OBJECT, CONF_FILE_ATTRIBUTE, AGGREGATOR_CONFIG_SECTION, CONFIG_ATTRIBUTES, RUNNING_FLAG, PROCESS_ATTRIBUTE, MALFORMED_FLAG, COMMENT_ATTRIBUTE, COMMENT_VALUE
-import os
-import os.path
-import sys
-import logging
-import configparser
-
-
-log = logging.getLogger('service_logger')
-
-
-@view_defaults(route_name='aggregator_config', renderer='json')
-class AggregatorConfig(object):
-    """
-    A class-based view for accessing and mutating the configuration of the aggregator.
-    """
-
-    def __init__(self, request):
-        """
-        Initialises the instance of the view with the request argument.
-
-        :param request: client's call request
-        """
-
-        self.request = request
-
-    @view_config(request_method="GET")
-    def get(self):
-        """
-        A GET API call for the configuration of the aggregator.
-
-        :return: A JSON response with the configuration of the aggregator.
-        """
-
-        aggregator_config_data = self.request.registry.settings[CONF_OBJECT]  # fetch the configuration object
-        if aggregator_config_data is None:
-            raise HTTPBadRequest("Aggregator has not been configured, yet. Send a PUT request to /aggregator/config with a JSON body of the configuration.")
-
-        config = {key: aggregator_config_data[AGGREGATOR_CONFIG_SECTION][key] for key in CONFIG_ATTRIBUTES}  # extract a json value containing the config attributes
-        config['aggregator_report_period'] = int(config['aggregator_report_period'])
-
-        return config
-
-    @view_config(request_method="PUT")
-    def put(self):
-        """
-        A PUT API call for the status of the aggregator.
-
-        :return: A JSON response to the PUT call - essentially with the new configured data and comment of the state of the aggregator
-        :raises HTTPBadRequest: if request body is not a valid JSON for the configurator
-        """
-
-        try:
-            new_config = self.request.body.decode(self.request.charset)
-            new_config = validate_config_content(new_config)  # validate the content and receive a json dictionary object
-        except AssertionError as e:
-            raise HTTPBadRequest("Bad request content. Configuration format is incorrect: {0}".format(e.args))
-
-        conf = self.request.registry.settings[CONF_OBJECT]
-        if conf is None:
-            conf = configparser.ConfigParser()
-            conf[AGGREGATOR_CONFIG_SECTION] = {}
-            self.request.registry.settings[CONF_OBJECT] = conf
-            old_config = {}
-        else:
-            # save the old configuration before updating so that it can be compared to the new one and checked for malformed state
-            old_config = {attribute: conf[AGGREGATOR_CONFIG_SECTION][attribute] for attribute in CONFIG_ATTRIBUTES}
-            old_config['aggregator_report_period'] = int(old_config['aggregator_report_period'])
-
-        for attribute in CONFIG_ATTRIBUTES:
-            conf[AGGREGATOR_CONFIG_SECTION][attribute] = str(new_config.get(attribute))  # update the configuration attributes
-
-        # if configuration is not already malformed, check whether the configuration is updated (changed in any way), if so (and the aggregator is running), malformed state is detected
-        if not self.request.registry.settings[MALFORMED_FLAG]:
-            malformed = old_config != new_config and AggregatorController.is_process_running(self.request.registry.settings.get(PROCESS_ATTRIBUTE))
-            self.request.registry.settings[MALFORMED_FLAG] = malformed
-            if malformed:
-                new_config[MALFORMED_FLAG] = True
-                new_config[COMMENT_ATTRIBUTE] = COMMENT_VALUE
-
-        self._write_conf_file()  # save the updated configuration to conf file
-        return new_config
-
-    def _write_conf_file(self):
-        """
-        Writes the configuration settings of the aggregator to a file with path stored at CONF_FILE_ATTRIBUTE
-        """
-
-        conf = self.request.registry.settings[CONF_OBJECT]
-        conf_file_path = self.request.registry.settings[CONF_FILE_ATTRIBUTE]
-        os.makedirs(os.path.dirname(conf_file_path), exist_ok=True)
-
-        log.info("Saving configuration to file {0}.".format(conf_file_path))
-        with open(conf_file_path, 'w') as configfile:
-            log.info("Opened configuration file {0}.".format(conf_file_path))
-            conf.write(configfile)
-        log.info("Successfully saved configuration to file {0}.".format(conf_file_path))
-
-
-@view_defaults(route_name='aggregator_controller', renderer='json')
-class AggregatorController(object):
-
-    """
-    A class-based view for controlling the aggregator.
-    """
-
-    def __init__(self, request):
-        """
-        Initialises the instance of the view with the request argument.
-
-        :param request: client's call request
-        """
-
-        self.request = request
-
-    @view_config(request_method="GET")
-    def get(self):
-        """
-        A GET API call for the status of the aggregator - running or not.
-
-        :return: A JSON response with the status of the aggregator.
-        """
-
-        aggregator_data = self.request.registry.settings
-        aggregator_process = aggregator_data.get(PROCESS_ATTRIBUTE)
-        aggregator_running = self.is_process_running(aggregator_process)
-
-        config = {RUNNING_FLAG: aggregator_running}
-
-        if aggregator_data[MALFORMED_FLAG] and aggregator_running:
-            config[MALFORMED_FLAG] = True
-            config[COMMENT_ATTRIBUTE] = COMMENT_VALUE
-
-        return config
-
-    @view_config(request_method="PUT")
-    def put(self):
-        """
-        A PUT API call for the status of the aggregator.
-
-        :return: A JSON response to the PUT call - essentially saying whether the aggregator is running or not
-        :raises HTTPBadRequest: if request body is not a valid JSON for the controller
-        """
-
-        content = self.request.body.decode(self.request.charset)
-
-        try:
-            content = validate_action_content(content)
-
-            conf = self.request.registry.settings[CONF_OBJECT]
-            if conf is None:
-                raise HTTPBadRequest("You must configure the aggregator before controlling it. Send a PUT request to /aggregator/config with a JSON body of the configuration.")
-
-            aggregator_config = {attribute: conf[AGGREGATOR_CONFIG_SECTION][attribute] for attribute in CONFIG_ATTRIBUTES}
-            aggregator_config['aggregator_report_period'] = int(aggregator_config['aggregator_report_period'])
-
-            action = content['action']
-
-            aggregator_running = self.is_process_running(self.request.registry.settings.get(PROCESS_ATTRIBUTE))
-            if action == 'start':
-                if not aggregator_running:
-                    process = self.start_aggregator(aggregator_config)
-                    aggregator_running = True
-                    self.request.registry.settings[PROCESS_ATTRIBUTE] = process
-                    self.request.registry.settings[MALFORMED_FLAG] = False
-            elif action == 'stop':
-                self.stop_aggregator(self.request.registry.settings.get(PROCESS_ATTRIBUTE))
-                aggregator_running = False
-                self.request.registry.settings[PROCESS_ATTRIBUTE] = None
-                self.request.registry.settings[MALFORMED_FLAG] = False
-            elif action == 'restart':
-                self.stop_aggregator(self.request.registry.settings.get(PROCESS_ATTRIBUTE))
-                process = self.start_aggregator(aggregator_config)
-                aggregator_running = True
-                self.request.registry.settings[PROCESS_ATTRIBUTE] = process
-                self.request.registry.settings[MALFORMED_FLAG] = False
-
-            return {RUNNING_FLAG: aggregator_running}
-
-        except AssertionError:
-            raise HTTPBadRequest('Bad request content - must be in JSON format: {"action": value}, where value is "start", "stop" or "restart".')
-
-    @staticmethod
-    def start_aggregator(config):
-        """
-        An auxiliary method to start the aggregator.
-
-        :param config: the configuration containing the arguments for the aggregator
-        :return: the process object of the started aggregator script
-        """
-
-        python_interpreter = sys.executable
-        command = [python_interpreter, '-m', 'clmcservice.aggregation.aggregator', '--period', str(config.get('aggregator_report_period')), '--database',
-                   config.get('aggregator_database_name'), '--url', config.get('aggregator_database_url')]
-        process = Popen(command)
-
-        log.info("\nStarted aggregator process with PID: {0}\n".format(process.pid))
-
-        return process
-
-    @staticmethod
-    def stop_aggregator(process):
-        """
-        An auxiliary method to stop the aggregator.
-
-        :param process: the process to terminate
-        """
-
-        # check if the process is started
-        if AggregatorController.is_process_running(process):
-            process.terminate()
-            log.info("\nStopped aggregator process with PID: {0}\n".format(process.pid))
-
-    @staticmethod
-    def is_process_running(process):
-        """
-        Checks if a process is running.
-
-        :param process: the Popen object to check
-        :return: True if running, False otherwise
-        """
-
-        # check if the process is started before trying to terminate it - process.poll() only returns something if the process has terminated, hence we check for a None value
-        return process is not None and process.poll() is None
diff --git a/src/service/clmcservice/alertsapi/alerts_specification_schema.py b/src/service/clmcservice/alertsapi/alerts_specification_schema.py
index 74a5170d96bfe06832a8522489df5765d0a7ae8d..a09bee88681661509cbfe3911d29d4b75649206f 100644
--- a/src/service/clmcservice/alertsapi/alerts_specification_schema.py
+++ b/src/service/clmcservice/alertsapi/alerts_specification_schema.py
@@ -62,7 +62,7 @@ URL_REGEX = compile(
     IGNORECASE)
 
 # Global tags allowed to be used for filtering in the trigger condition
-CLMC_INFORMATION_MODEL_GLOBAL_TAGS = ("sfc", "sfci", "sf_package", "sf", "sf_endpoint", "host", "location")
+CLMC_INFORMATION_MODEL_GLOBAL_TAGS = ("sfc", "sfci", "sfp", "sf", "sfe", "server", "location")
 
 ALERTS_SPECIFICATION_SCHEMA = Schema({
     "tosca_definitions_version": And(str, lambda v: v == "tosca_simple_profile_for_nfv_1_0_0"),
diff --git a/src/service/clmcservice/alertsapi/views.py b/src/service/clmcservice/alertsapi/views.py
index d5d41b2a560397f493cad9349b3b5c0c2d58564c..33bdfb97ca800cfe2186de3117359f6ee01846ea 100644
--- a/src/service/clmcservice/alertsapi/views.py
+++ b/src/service/clmcservice/alertsapi/views.py
@@ -194,8 +194,8 @@ class AlertsConfigurationAPI(object):
                 if "resource_type" in trigger.trigger_tpl["condition"]:
                     tags = condition["resource_type"]
                     # make sure alert tasks are executing with queries for the given sfc and sfc instance
-                    # tags["sfc"] = sfc TODO uncomment this line when we update telegraf to name db after sfc
-                    # tags["sfci"] = sfc_instance TODO uncomment this line when telegraf global tags are updated, currently we have sfc_i instead of sfci
+                    tags["sfc"] = sfc
+                    tags["sfci"] = sfc_instance
 
                     # NOTE: if the template has its where clause defined as lambda (stream templates), then use "==" as comparison operator,
                     #       else if the template's where clause is defined as a string (batch templates), then use "=" as comparison operator
diff --git a/src/service/clmcservice/graphapi/conftest.py b/src/service/clmcservice/graphapi/conftest.py
index e36ac06f72af6e96dc17dcb2ec5cee5efb611aee..1e3708d09b0b63652c81b48f8cd308b459fe4c74 100644
--- a/src/service/clmcservice/graphapi/conftest.py
+++ b/src/service/clmcservice/graphapi/conftest.py
@@ -130,7 +130,10 @@ def db_testing_data():
 
     global network_config
 
-    test_db_name = "TestInfluxDB"
+    test_sfc_name = "test_sfc"
+    test_sfc_instance_1_name = "test_sfc_premium"
+    test_sfc_instance_2_name = "test_sfc_non_premium"
+    test_db_name = test_sfc_name
 
     # ASSUMES both Influx and Neo4j are running on localhost with default ports
     influx = InfluxDBClient(host="localhost", port=8086, timeout=10)
@@ -155,65 +158,65 @@ def db_testing_data():
 
     # nginx data to report to influx
     data = [
-        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 5, 20, 1500, 15000, 1528385860),
-        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 8, 35, 1000, 11000, 1528385860),
-        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 7, 15, 2300, 10000, 1528389860),
-        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 10, 23, 98000, 1200, 1528389860),
-        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 12, 17, 2000, 7500, 1528395860),
-        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 15, 11, 1300, 6700, 1528395860),
-        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 17, 23, 3000, 8300, 1528485860),
-        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 19, 24, 76000, 1200, 1528485860),
-        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 11, 16, 2500, 7500, 1528545860),
-        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 20, 18, 1700, 12000, 1528545860)
+        ("nginx_1_ep1", "DC4", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 5, 20, 1500, 15000, 1528385860),
+        ("nginx_1_ep2", "DC6", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 8, 35, 1000, 11000, 1528385860),
+        ("nginx_1_ep1", "DC4", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 7, 15, 2300, 10000, 1528389860),
+        ("nginx_1_ep2", "DC6", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 10, 23, 98000, 1200, 1528389860),
+        ("nginx_1_ep1", "DC4", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 12, 17, 2000, 7500, 1528395860),
+        ("nginx_1_ep2", "DC6", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 15, 11, 1300, 6700, 1528395860),
+        ("nginx_1_ep1", "DC4", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 17, 23, 3000, 8300, 1528485860),
+        ("nginx_1_ep2", "DC6", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 19, 24, 76000, 1200, 1528485860),
+        ("nginx_1_ep1", "DC4", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 11, 16, 2500, 7500, 1528545860),
+        ("nginx_1_ep2", "DC6", "nginx", "nginx_1", test_sfc_name, test_sfc_instance_1_name, 20, 18, 1700, 12000, 1528545860)
     ]
     influx.write_points([
         {"measurement": "nginx",
-         "tags": {"host": host, "ipendpoint": endpoint, "location": location, "sf": sf, "sf_i": sf_i, "sfc": sfc, "sfc_i": sfc_i, "sr": sr},
+         "tags": {"server": location, "sfe": sfe, "location": location, "sfp": sfp, "sf": sf, "sfc": sfc, "sfci": sfci},
          "fields": {"requests": num_requests, "avg_processing_time": processing_time, "avg_request_size": request_size, "avg_response_size": response_size},
          "time": timestamp * 10 ** 9
-         } for host, endpoint, location, sf, sf_i, sfc, sfc_i, sr, num_requests, processing_time, request_size, response_size, timestamp in data
+         } for sfe, location, sfp, sf, sfc, sfci, num_requests, processing_time, request_size, response_size, timestamp in data
     ])
 
     # minio data to report to influx
     data = [
-        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 12, 86, 101000, 4700, 1528386860),
-        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 15, 75, 96000, 6300, 1528386860),
-        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 7, 105, 5200, 89200, 1528388860),
-        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 12, 60, 76900, 2100, 1528388860),
-        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 11, 121, 99500, 3500, 1528410860),
-        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 12, 154, 2700, 111000, 1528410860),
-        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 14, 84, 1100, 4300, 1528412860),
-        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 5, 45, 1200, 3200, 1528412860),
-        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 7, 63, 87000, 2000, 1528414860),
-        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 16, 86, 3100, 94000, 1528414860)
+        ("minio_1_ep1", "DC4", "minio", "minio_1", test_sfc_name, test_sfc_instance_1_name, 12, 86, 101000, 4700, 1528386860),
+        ("minio_2_ep1", "DC5", "minio", "minio_2", test_sfc_name, test_sfc_instance_2_name, 15, 75, 96000, 6300, 1528386860),
+        ("minio_1_ep1", "DC4", "minio", "minio_1", test_sfc_name, test_sfc_instance_1_name, 7, 105, 5200, 89200, 1528388860),
+        ("minio_2_ep1", "DC5", "minio", "minio_2", test_sfc_name, test_sfc_instance_2_name, 12, 60, 76900, 2100, 1528388860),
+        ("minio_1_ep1", "DC4", "minio", "minio_1", test_sfc_name, test_sfc_instance_1_name, 11, 121, 99500, 3500, 1528410860),
+        ("minio_2_ep1", "DC5", "minio", "minio_2", test_sfc_name, test_sfc_instance_2_name, 12, 154, 2700, 111000, 1528410860),
+        ("minio_1_ep1", "DC4", "minio", "minio_1", test_sfc_name, test_sfc_instance_1_name, 14, 84, 1100, 4300, 1528412860),
+        ("minio_2_ep1", "DC5", "minio", "minio_2", test_sfc_name, test_sfc_instance_2_name, 5, 45, 1200, 3200, 1528412860),
+        ("minio_1_ep1", "DC4", "minio", "minio_1", test_sfc_name, test_sfc_instance_1_name, 7, 63, 87000, 2000, 1528414860),
+        ("minio_2_ep1", "DC5", "minio", "minio_2", test_sfc_name, test_sfc_instance_2_name, 16, 86, 3100, 94000, 1528414860)
     ]
     influx.write_points([
         {"measurement": "minio_http",
-         "tags": {"host": host, "ipendpoint": endpoint, "location": location, "sf": sf, "sf_i": sf_i, "sfc": sfc, "sfc_i": sfc_i, "sr": sr},
+         "tags": {"server":  location, "sfe": sfe, "location": location, "sfp": sfp, "sf": sf, "sfc": sfc, "sfci": sfci},
          "fields": {"total_requests_count": num_requests, "total_processing_time": processing_time, "total_requests_size": request_size, "total_response_size": response_size},
          "time": timestamp * 10 ** 9
-         } for host, endpoint, location, sf, sf_i, sfc, sfc_i, sr, num_requests, processing_time, request_size, response_size, timestamp in data
+         } for sfe, location, sfp, sf, sfc, sfci, num_requests, processing_time, request_size, response_size, timestamp in data
     ])
 
     # apache data to report to influx
     data = [
-        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 15, 1400, 15600, 1528386860),
-        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 17, 2200, 11200, 1528388860),
-        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 19, 700, 5700, 1528410860),
-        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 24, 1900, 4300, 1528412860),
-        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 13, 1200, 2500, 1528414860),
+        ("apache_1_ep1", "DC5", "apache", "apache_1", test_sfc_name, test_sfc_instance_2_name, 15, 1400, 15600, 1528386860),
+        ("apache_1_ep1", "DC5", "apache", "apache_1", test_sfc_name, test_sfc_instance_2_name, 17, 2200, 11200, 1528388860),
+        ("apache_1_ep1", "DC5", "apache", "apache_1", test_sfc_name, test_sfc_instance_2_name, 19, 700, 5700, 1528410860),
+        ("apache_1_ep1", "DC5", "apache", "apache_1", test_sfc_name, test_sfc_instance_2_name, 24, 1900, 4300, 1528412860),
+        ("apache_1_ep1", "DC5", "apache", "apache_1", test_sfc_name, test_sfc_instance_2_name, 13, 1200, 2500, 1528414860),
     ]
     influx.write_points([
         {"measurement": "apache",
-         "tags": {"host": host, "ipendpoint": endpoint, "location": location, "sf": sf, "sf_i": sf_i, "sfc": sfc, "sfc_i": sfc_i, "sr": sr},
+         "tags": {"server": location, "sfe": sfe, "location": location, "sfp": sfp, "sf": sf, "sfc": sfc, "sfci": sfci},
          "fields": {"avg_processing_time": processing_time, "avg_request_size": request_size, "avg_response_size": response_size},
          "time": timestamp * 10 ** 9
-         } for host, endpoint, location, sf, sf_i, sfc, sfc_i, sr, processing_time, request_size, response_size, timestamp in data
+         } for sfe, location, sfp, sf, sfc, sfci, processing_time, request_size, response_size, timestamp in data
     ])
 
-    yield from_timestamp, to_timestamp, test_db_name, graph
+    yield from_timestamp, to_timestamp, graph
 
     # clean up after the test is over - delete the test databases and clear up the graph
     influx.drop_database("CLMCMetrics")
-    influx.drop_database("TestInfluxDB")
+    influx.drop_database(test_db_name)
     graph.delete_all()
diff --git a/src/service/clmcservice/graphapi/tests.py b/src/service/clmcservice/graphapi/tests.py
index 9dcd23eaa93819f0743dcab77756969ac53d9fe5..c26aa2c1bf48291fac7b67a6d63bd7d50f95b2c7 100644
--- a/src/service/clmcservice/graphapi/tests.py
+++ b/src/service/clmcservice/graphapi/tests.py
@@ -54,25 +54,21 @@ class TestGraphAPI(object):
     @pytest.mark.parametrize("body, from_timestamp, to_timestamp, error_msg", [
         (None, None, None, "A bad request error must have been raised in case of missing request body."),
         ('{}', 12341412, 1234897, "A bad request error must have been raised in case of invalid request body."),
-        ('{"database": "CLMCMetrics", "retention_policy": "autogen", "service_function_chain_instance": "sfc_i"}', 12341412, 1234897, "A bad request error must have been raised in case of invalid request body."),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": "{invalid_json}"}', 1528386860, 1528389860, "A bad request error must have been raised in case of invalid request body."),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": ["nginx", "minio"]}', 1528386860, 1528389860, "A bad request error must have been raised in case of invalid request body."),
-        ('{"retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
-         1528386860, 1528389860, "A bad request error must have been raised in case of missing database value in the request body"),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_id", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
-         1528386860, 1528389860, "A bad request error must have been raised in case of invalid sfc_i ID in the request body"),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "testsfc1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
-         1528386860, 1528389860, "A bad request error must have been raised in case of invalid sfc_i ID in the request body"),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfci"}', 12341412, 1234897, "A bad request error must have been raised in case of invalid request body."),
+        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": "{invalid_json}"}', 1528386860, 1528389860, "A bad request error must have been raised in case of invalid request body."),
+        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": ["nginx", "minio"]}', 1528386860, 1528389860, "A bad request error must have been raised in case of invalid request body."),
+        ('"service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         1528386860, 1528389860, "A bad request error must have been raised in case of missing service function chain value in the request body"),
+        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfcinstance", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         1528386860, 1528389860, "A bad request error must have been raised in case of invalid sfci ID in the request body"),
+        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
          "not a timestamp", "not a timestamp", "A bad request error must have been raised in case of invalid URL parameters."),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
          None, "not a timestamp", "A bad request error must have been raised in case of invalid URL parameters."),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
          2131212, None, "A bad request error must have been raised in case of invalid URL parameters."),
-        ('{"database": "DB-not-exist", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+        ('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
          2131212, 2131212, "A bad request error must have been raised in case of a non-existing database."),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen-invalid", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
-         2131212, 2131212, "A bad request error must have been raised in case of a non-existing retention policy."),
     ])
     def test_build_error_handling(self, body, from_timestamp, to_timestamp, error_msg):
         """
@@ -108,7 +104,7 @@ class TestGraphAPI(object):
 
         global graph_1_id, graph_2_id  # these variables are used to store the ID of the graphs that were created during the execution of this test method; they are reused later when testing the delete method
 
-        from_timestamp, to_timestamp, test_db_name, graph_db = db_testing_data
+        from_timestamp, to_timestamp, graph_db = db_testing_data
 
         dc_nodes = set([node["name"] for node in graph_db.nodes.match("ComputeNode")])
         assert dc_nodes == set("DC" + str(i) for i in range(1, 7)), "Compute nodes must have been created by the db_testing_data fixture"
@@ -120,7 +116,7 @@ class TestGraphAPI(object):
                                         "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
                                  apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
                                          "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"})
-        body = dumps(dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="sfc_1", service_functions=service_functions))
+        body = dumps(dict(service_function_chain="sfc", service_function_chain_instance="sfc_1", service_functions=service_functions))
         request = testing.DummyRequest()
         request.params["from_timestamp"] = 12341412
         request.params["to_timestamp"] = 12341412
@@ -137,7 +133,7 @@ class TestGraphAPI(object):
                                         "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
                                  minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
                                         "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"})
-        build_json_body = dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="test_sfc1_1", service_functions=service_functions)
+        build_json_body = dict(service_function_chain="test_sfc", service_function_chain_instance="test_sfc_premium", service_functions=service_functions)
         body = dumps(build_json_body)
         request = testing.DummyRequest()
         request.params["from"] = from_timestamp
@@ -157,12 +153,12 @@ class TestGraphAPI(object):
         assert sf_i_names == {"nginx_1", "minio_1"}, "The graph must contain 2 service function instances - nginx_1 and minio_1"
         endpoints = set([node["name"] for node in graph_db.nodes.match("Endpoint", uuid=request_id)])
         assert endpoints == {"minio_1_ep1", "nginx_1_ep1", "nginx_1_ep2"}, "The graph must contain 3 endpoints - minio_1_ep1, nginx_1_ep1, nginx_1_ep2"
-        sfc_i_names = set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChainInstance")])
-        assert sfc_i_names == {"test_sfc1_1"}, "The graph must contain 1 service function chain instance - test_sfc1_1"
+        sfci_names = set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChainInstance")])
+        assert sfci_names == {"test_sfc_premium"}, "The graph must contain 1 service function chain instance - test_sfc_premium"
         sfc_names = set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChain")])
-        assert sfc_names == {"test_sfc1"}, "The graph must contain 1 service function chain - test_sfc1"
+        assert sfc_names == {"test_sfc"}, "The graph must contain 1 service function chain - test_sfc"
 
-        reference_node = graph_db.nodes.match("Reference", uuid=request_id, sfc_i="test_sfc1_1", sfc="test_sfc1").first()
+        reference_node = graph_db.nodes.match("Reference", uuid=request_id, sfci="test_sfc_premium", sfc="test_sfc").first()
         assert reference_node is not None and reference_node["from"] == from_timestamp * 10**9 and reference_node["to"] == to_timestamp * 10**9, "Reference node must have been created"
 
         # check the appropriate edges have been created
@@ -175,11 +171,11 @@ class TestGraphAPI(object):
                 ("nginx_1", "ServiceFunctionInstance", "nginx_1_ep1", "Endpoint", "realisedBy"),
                 ("nginx_1", "ServiceFunctionInstance", "nginx_1_ep2", "Endpoint", "realisedBy"),
                 ("minio_1", "ServiceFunctionInstance", "minio", "ServiceFunction", "instanceOf"),
-                ("nginx_1", "ServiceFunctionInstance", "test_sfc1_1", "ServiceFunctionChainInstance", "utilizedBy"),
-                ("minio_1", "ServiceFunctionInstance", "test_sfc1_1", "ServiceFunctionChainInstance", "utilizedBy"),
-                ("nginx", "ServiceFunction", "test_sfc1", "ServiceFunctionChain", "utilizedBy"),
-                ("minio", "ServiceFunction", "test_sfc1", "ServiceFunctionChain", "utilizedBy"),
-                ("test_sfc1_1", "ServiceFunctionChainInstance", "test_sfc1", "ServiceFunctionChain", "instanceOf"),
+                ("nginx_1", "ServiceFunctionInstance", "test_sfc_premium", "ServiceFunctionChainInstance", "utilizedBy"),
+                ("minio_1", "ServiceFunctionInstance", "test_sfc_premium", "ServiceFunctionChainInstance", "utilizedBy"),
+                ("nginx", "ServiceFunction", "test_sfc", "ServiceFunctionChain", "utilizedBy"),
+                ("minio", "ServiceFunction", "test_sfc", "ServiceFunctionChain", "utilizedBy"),
+                ("test_sfc_premium", "ServiceFunctionChainInstance", "test_sfc", "ServiceFunctionChain", "instanceOf"),
             ), graph_db, request_id
         )
 
@@ -196,7 +192,7 @@ class TestGraphAPI(object):
                                         "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
                                  apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
                                          "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"})
-        build_json_body = dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="test_sfc2_1", service_functions=service_functions)
+        build_json_body = dict(service_function_chain="test_sfc", service_function_chain_instance="test_sfc_non_premium", service_functions=service_functions)
         body = dumps(build_json_body)
         request = testing.DummyRequest()
         request.params["from"] = from_timestamp
@@ -218,10 +214,10 @@ class TestGraphAPI(object):
         for ep in ("minio_2_ep1", "apache_1_ep1"):
             assert graph_db.nodes.match("Endpoint", name=ep, uuid=request_id).first() is not None, "Endpoint {0} must have been added to the graph".format(ep)
 
-        assert graph_db.nodes.match("ServiceFunctionChainInstance", name="test_sfc2_1").first() is not None, "Service function chain instance test_sfc2_1 must have been added to the graph"
-        assert graph_db.nodes.match("ServiceFunctionChain", name="test_sfc2").first() is not None, "Service function chain test_sfc2 must have been added to the graph"
+        assert graph_db.nodes.match("ServiceFunctionChainInstance", name="test_sfc_non_premium").first() is not None, "Service function chain instance test_sfc_non_premium must have been added to the graph"
+        assert graph_db.nodes.match("ServiceFunctionChain", name="test_sfc").first() is not None, "Service function chain test_sfc must have been added to the graph"
 
-        reference_node = graph_db.nodes.match("Reference", uuid=request_id, sfc_i="test_sfc2_1", sfc="test_sfc2").first()
+        reference_node = graph_db.nodes.match("Reference", uuid=request_id, sfci="test_sfc_non_premium", sfc="test_sfc").first()
         assert reference_node is not None and reference_node["from"] == from_timestamp * 10**9 and reference_node["to"] == to_timestamp * 10**9, "Reference node must have been created"
 
         # check the appropriate edges have been created
@@ -233,11 +229,11 @@ class TestGraphAPI(object):
                 ("apache_1", "ServiceFunctionInstance", "apache_1_ep1", "Endpoint", "realisedBy"),
                 ("minio_2", "ServiceFunctionInstance", "minio", "ServiceFunction", "instanceOf"),
                 ("apache_1", "ServiceFunctionInstance", "apache", "ServiceFunction", "instanceOf"),
-                ("minio_2", "ServiceFunctionInstance", "test_sfc2_1", "ServiceFunctionChainInstance", "utilizedBy"),
-                ("apache_1", "ServiceFunctionInstance", "test_sfc2_1", "ServiceFunctionChainInstance", "utilizedBy"),
-                ("minio", "ServiceFunction", "test_sfc2", "ServiceFunctionChain", "utilizedBy"),
-                ("apache", "ServiceFunction", "test_sfc2", "ServiceFunctionChain", "utilizedBy"),
-                ("test_sfc2_1", "ServiceFunctionChainInstance", "test_sfc2", "ServiceFunctionChain", "instanceOf")
+                ("minio_2", "ServiceFunctionInstance", "test_sfc_non_premium", "ServiceFunctionChainInstance", "utilizedBy"),
+                ("apache_1", "ServiceFunctionInstance", "test_sfc_non_premium", "ServiceFunctionChainInstance", "utilizedBy"),
+                ("minio", "ServiceFunction", "test_sfc", "ServiceFunctionChain", "utilizedBy"),
+                ("apache", "ServiceFunction", "test_sfc", "ServiceFunctionChain", "utilizedBy"),
+                ("test_sfc_non_premium", "ServiceFunctionChainInstance", "test_sfc", "ServiceFunctionChain", "instanceOf")
             ), graph_db, request_id
         )
 
@@ -258,7 +254,7 @@ class TestGraphAPI(object):
 
         global graph_1_id, graph_2_id
 
-        from_timestamp, to_timestamp, test_db_name, graph_db = db_testing_data
+        from_timestamp, to_timestamp, graph_db = db_testing_data
 
         request = testing.DummyRequest()
         request.matchdict["graph_id"] = "invalid_graph_id"
@@ -285,6 +281,8 @@ class TestGraphAPI(object):
         assert set([node["name"] for node in graph_db.nodes.match("ComputeNode")]) == set(["DC" + str(i) for i in range(1, 7)]), "Compute nodes must not be deleted"
         assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionInstance")]) == {"nginx_1", "apache_1", "minio_1", "minio_2"}, "Service function instances must not be deleted."
         assert set([node["name"] for node in graph_db.nodes.match("ServiceFunction")]) == {"nginx", "minio", "apache"}, "Service functions must not be deleted"
+        assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChainInstance")]) == {"test_sfc_premium", "test_sfc_non_premium"}, "Service function chain instances must not be deleted"
+        assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChain")]) == {"test_sfc"}, "Service function chains must not be deleted"
 
     @pytest.mark.parametrize("graph_id, endpoint, compute_node, error_type, error_msg", [
         ('e8cd4768-47dd-48cd-9c74-7f8926ddbad8', None, None, HTTPBadRequest, "HTTP Bad Request must be thrown in case of missing or invalid url parameters"),
@@ -324,14 +322,14 @@ class TestGraphAPI(object):
         :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest)
         """
 
-        from_timestamp, to_timestamp, test_db_name, graph_db = db_testing_data
+        from_timestamp, to_timestamp, graph_db = db_testing_data
 
         # create a graph to use for RTT test by using the build API endpoint
         service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)",
                                         "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
                                  minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
                                         "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"})
-        build_json_body = dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="test_sfc1_1", service_functions=service_functions)
+        build_json_body = dict(service_function_chain="test_sfc", service_function_chain_instance="test_sfc_premium", service_functions=service_functions)
         body = dumps(build_json_body)
         request = testing.DummyRequest()
         request.params["from"] = from_timestamp
@@ -379,9 +377,9 @@ class TestGraphAPI(object):
 
         # go through the set of input/output (expected) parameters and assert actual results match with expected ones
         for dc, endpoint, forward_latencies, reverse_latencies, response_time, request_size, response_size, rtt, global_tags in (
-            ("DC6", "nginx_1_ep2", [], [], 22.2, 35600, 6420, 22.2, {"location": "DC6", "sr": "sr6", "ipendpoint": "nginx_1_ep2", "host": "host2", "sfc": "test_sfc1", "sfc_i": "test_sfc1_1", "sf": "nginx", "sf_i": "nginx_1"}),
-            ("DC2", "nginx_1_ep2", [11, 15, 4.5], [5.5, 13, 7.5], 22.2, 35600, 6420, 78, {"location": "DC6", "sr": "sr6", "ipendpoint": "nginx_1_ep2", "host": "host2", "sfc": "test_sfc1", "sfc_i": "test_sfc1_1", "sf": "nginx", "sf_i": "nginx_1"}),
-            ("DC3", "nginx_1_ep1", [12.5], [7.5], 18.2, 2260, 9660, 38, {"location": "DC4", "sr": "sr4", "ipendpoint": "nginx_1_ep1", "host": "host1", "sfc": "test_sfc1", "sfc_i": "test_sfc1_1", "sf": "nginx", "sf_i": "nginx_1"})
+            ("DC6", "nginx_1_ep2", [], [], 22.2, 35600, 6420, 22.2, {"location": "DC6", "sfe": "nginx_1_ep2", "server": "DC6", "sfc": "test_sfc", "sfci": "test_sfc_premium", "sfp": "nginx", "sf": "nginx_1"}),
+            ("DC2", "nginx_1_ep2", [11, 15, 4.5], [5.5, 13, 7.5], 22.2, 35600, 6420, 78, {"location": "DC6", "sfe": "nginx_1_ep2", "server": "DC6", "sfc": "test_sfc", "sfci": "test_sfc_premium", "sfp": "nginx", "sf": "nginx_1"}),
+            ("DC3", "nginx_1_ep1", [12.5], [7.5], 18.2, 2260, 9660, 38, {"location": "DC4", "sfe": "nginx_1_ep1", "server": "DC4", "sfc": "test_sfc", "sfci": "test_sfc_premium", "sfp": "nginx", "sf": "nginx_1"})
         ):
             request = testing.DummyRequest()
             request.matchdict["graph_id"] = request_id
@@ -399,7 +397,7 @@ class TestGraphAPI(object):
                                         "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
                                  apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
                                          "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"})
-        build_json_body = dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="test_sfc2_1", service_functions=service_functions)
+        build_json_body = dict(service_function_chain="test_sfc", service_function_chain_instance="test_sfc_non_premium", service_functions=service_functions)
         body = dumps(build_json_body)
         request = testing.DummyRequest()
         request.params["from"] = from_timestamp
@@ -413,10 +411,10 @@ class TestGraphAPI(object):
 
         # go through the set of input/output (expected) parameters and assert actual results match with expected ones
         for dc, endpoint, forward_latencies, reverse_latencies, response_time, request_size, response_size, rtt, global_tags in (
-            ("DC5", "apache_1_ep1", [], [], 17.6, 1480, 7860, 17.6, {"location": "DC5", "sr": "sr5", "ipendpoint": "apache_1_ep1", "host": "host5", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "apache", "sf_i": "apache_1"}),
-            ("DC5", "minio_2_ep1", [], [], 7, 2998, 3610, 7, {"location": "DC5", "sr": "sr5", "ipendpoint": "minio_2_ep1", "host": "host4", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "minio", "sf_i": "minio_2"}),
-            ("DC3", "apache_1_ep1", [10, 15], [13, 9], 17.6, 1480, 7860, 64, {"location": "DC5", "sr": "sr5", "ipendpoint": "apache_1_ep1", "host": "host5", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "apache", "sf_i": "apache_1"}),
-            ("DC2", "minio_2_ep1", [11, 15], [13, 7.5], 7, 2998, 3610, 53, {"location": "DC5", "sr": "sr5", "ipendpoint": "minio_2_ep1", "host": "host4", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "minio", "sf_i": "minio_2"})
+            ("DC5", "apache_1_ep1", [], [], 17.6, 1480, 7860, 17.6, {"location": "DC5", "sfe": "apache_1_ep1", "server": "DC5", "sfc": "test_sfc", "sfci": "test_sfc_non_premium", "sfp": "apache", "sf": "apache_1"}),
+            ("DC5", "minio_2_ep1", [], [], 7, 2998, 3610, 7, {"location": "DC5", "sfe": "minio_2_ep1", "server": "DC5", "sfc": "test_sfc", "sfci": "test_sfc_non_premium", "sfp": "minio", "sf": "minio_2"}),
+            ("DC3", "apache_1_ep1", [10, 15], [13, 9], 17.6, 1480, 7860, 64, {"location": "DC5", "sfe": "apache_1_ep1", "server": "DC5", "sfc": "test_sfc", "sfci": "test_sfc_non_premium", "sfp": "apache", "sf": "apache_1"}),
+            ("DC2", "minio_2_ep1", [11, 15], [13, 7.5], 7, 2998, 3610, 53, {"location": "DC5", "sfe": "minio_2_ep1", "server": "DC5", "sfc": "test_sfc", "sfci": "test_sfc_non_premium", "sfp": "minio", "sf": "minio_2"})
         ):
             request = testing.DummyRequest()
             request.matchdict["graph_id"] = request_id
diff --git a/src/service/clmcservice/graphapi/utilities.py b/src/service/clmcservice/graphapi/utilities.py
index 58d1dff2c40c8ec550194d96e887f1ed05d71439..2ac899d30186aa7a8985d1f245fc8b33873c2941 100644
--- a/src/service/clmcservice/graphapi/utilities.py
+++ b/src/service/clmcservice/graphapi/utilities.py
@@ -30,10 +30,10 @@ import logging
 GRAPH_ROUND_TRIP_TIME_URL_PARAMS = ("compute_node", "endpoint")
 
 GRAPH_BUILD_URL_PARAMS = ("from", "to")
-GRAPH_BUILD_QUERY_PARAMS = {"database", "retention_policy", "service_function_chain_instance", "service_functions"}
+GRAPH_BUILD_QUERY_PARAMS = {"service_function_chain", "service_function_chain_instance", "service_functions"}
 GRAPH_BUILD_SF_QUERY_PARAMS = {"response_time_field", "request_size_field", "response_size_field", "measurement_name"}
 
-INFLUX_QUERY_TEMPLATE = 'SELECT {0} AS mean_response_time, {1} AS mean_request_size, {2} AS mean_response_size FROM "{3}"."{4}".{5} WHERE sfc_i=\'{6}\' and time>={7} and time<{8} GROUP BY ipendpoint, location, sf_i, host, sr'
+INFLUX_QUERY_TEMPLATE = 'SELECT {0} AS mean_response_time, {1} AS mean_request_size, {2} AS mean_response_size FROM "{3}"."{4}".{5} WHERE sfc=\'{6}\' and sfci=\'{7}\' and time>={8} and time<{9} GROUP BY sfe, location, sf'
 
 
 RTT_CYPHER_QUERY_TEMPLATE = """
@@ -57,7 +57,9 @@ def validate_json_queries_body(body):
     Validates the request body containing mappings from service functions to queries to execute.
 
     :param body: the request body to validate
+
     :return the validated json queries dictionary object
+
     :raise AssertionError: if the body is invalid
     """
 
@@ -70,15 +72,16 @@ def validate_json_queries_body(body):
 
     assert GRAPH_BUILD_QUERY_PARAMS == set(body.keys()), "Invalid JSON query document."
 
-    sfc_i = body["service_function_chain_instance"]
-    sfc_i_subparts = sfc_i.split('_')
-    assert len(sfc_i_subparts) > 1, "Incorrect format of service function chain instance ID - use format <sfcID>_<instanceNum>"
-
-    # check the last part of the sfc_i ID is a number
-    try:
-        int(sfc_i_subparts[-1])
-    except ValueError:
-        assert False, "Incorrect format of service function chain instance ID - use format <sfcID>_<instanceNum>"
+    # NOTE: this code is now outdated - we no longer have SFC instance ID depending on the SFC ID
+    # sfc_i = body["service_function_chain_instance"]
+    # sfc_i_subparts = sfc_i.split('_')
+    # assert len(sfc_i_subparts) > 1, "Incorrect format of service function chain instance ID - use format <sfcID>_<instanceNum>"
+    #
+    # # check the last part of the sfc_i ID is a number
+    # try:
+    #     int(sfc_i_subparts[-1])
+    # except ValueError:
+    #     assert False, "Incorrect format of service function chain instance ID - use format <sfcID>_<instanceNum>"
 
     assert type(body["service_functions"]) == dict, "The service function description should be represented with a dictionary."
 
@@ -193,22 +196,21 @@ def build_temporal_graph(request_id, from_timestamp, to_timestamp, json_queries,
 
     global INFLUX_QUERY_TEMPLATE
 
-    db = json_queries["database"]
-    rp = json_queries["retention_policy"]
-    sfc_i = json_queries["service_function_chain_instance"]
-
-    log.info("Building graph for service function chain {0} from database {1} with retention policy {2}".format(sfc_i, db, rp))
+    sfc = json_queries["service_function_chain"]
+    sfci = json_queries["service_function_chain_instance"]
+    db = sfc
+    rp = "autogen"
 
-    sfc = "_".join(sfc_i.split('_')[: -1])  # assumes sfc_i is always in the form <sfc>_<num>
+    log.info("Building graph for service function chain {0}/{1} from database {2} with retention policy {3}".format(sfc, sfci, db, rp))
 
     # create a UUID reference node
-    reference_node = Node("Reference", **{"uuid": request_id, "sfc": sfc, "sfc_i": sfc_i, "from": from_timestamp, "to": to_timestamp})
+    reference_node = Node("Reference", **{"uuid": request_id, "sfc": sfc, "sfci": sfci, "from": from_timestamp, "to": to_timestamp})
     graph.create(reference_node)
 
     # create a node for the service function chain if it doesn't exist
     service_function_chain_node = find_or_create_node(graph, "ServiceFunctionChain", name=sfc)
     # create a node for the service function chain instance if it doesn't exist
-    service_function_chain_instance_node = find_or_create_node(graph, "ServiceFunctionChainInstance", name=sfc_i)
+    service_function_chain_instance_node = find_or_create_node(graph, "ServiceFunctionChainInstance", name=sfci)
     # create a instanceOf edge if it doesn't exist
     find_or_create_edge(graph, "instanceOf", service_function_chain_instance_node, service_function_chain_node)
 
@@ -224,7 +226,7 @@ def build_temporal_graph(request_id, from_timestamp, to_timestamp, json_queries,
         measurement = query_data["measurement_name"]
 
         # build up the query by setting the placeholders in the query template
-        query_to_execute = INFLUX_QUERY_TEMPLATE.format(response_time_field, request_size_field, response_size_field, db, rp, measurement, sfc_i, from_timestamp, to_timestamp)
+        query_to_execute = INFLUX_QUERY_TEMPLATE.format(response_time_field, request_size_field, response_size_field, db, rp, measurement, sfc, sfci, from_timestamp, to_timestamp)
 
         # create a node for the service function if it doesn't exist
         service_function_node = find_or_create_node(graph, "ServiceFunction", name=service_function)
@@ -248,14 +250,14 @@ def build_temporal_graph(request_id, from_timestamp, to_timestamp, json_queries,
             response_size = result_point["mean_response_size"]  # extract the avg response size of the SF from the result
 
             # create a ServiceFunctionInstance node from the tag value (if it is not already created)
-            service_function_instance_node = find_or_create_node(graph, "ServiceFunctionInstance", name=tags["sf_i"])
+            service_function_instance_node = find_or_create_node(graph, "ServiceFunctionInstance", name=tags["sf"])
             # create an edge between the instance and the service function (if it is not already created)
             find_or_create_edge(graph, "instanceOf", service_function_instance_node, service_function_node)
             # crate a utilizedBy edge between the service function instance and the service function chain instance
             find_or_create_edge(graph, "utilizedBy", service_function_instance_node, service_function_chain_instance_node)
 
             # create an Endpoint node from the tag value (if it is not already created)
-            ipendpoint_node = find_or_create_node(graph, "Endpoint", name=tags["ipendpoint"], response_time=response_time, request_size=request_size, response_size=response_size, uuid=request_id, host=tags["host"], sr=tags["sr"])
+            ipendpoint_node = find_or_create_node(graph, "Endpoint", name=tags["sfe"], response_time=response_time, request_size=request_size, response_size=response_size, uuid=request_id)
             # create an edge between the instance and the endpoint (if it is not already created)
             find_or_create_edge(graph, "realisedBy", service_function_instance_node, ipendpoint_node)
 
@@ -266,7 +268,7 @@ def build_temporal_graph(request_id, from_timestamp, to_timestamp, json_queries,
 
             compute_nodes.add(compute_node)  # add the compute node to the set of compute nodes
 
-    log.info("Finished building graph for service function chain {0} from database {1} with retention policy {2}".format(sfc_i, db, rp))
+    log.info("Finished building graph for service function chain {0} from database {1} with retention policy {2}".format(sfci, db, rp))
 
 
 def delete_temporal_subgraph(graph, subgraph_id):
diff --git a/src/service/clmcservice/graphapi/views.py b/src/service/clmcservice/graphapi/views.py
index 79ff877a8fa47addc0d06f0369a1165b138ccb9b..710c9afc44f3eda5b8ffab1e33431bcc9e76f2d0 100644
--- a/src/service/clmcservice/graphapi/views.py
+++ b/src/service/clmcservice/graphapi/views.py
@@ -74,13 +74,9 @@ class GraphAPI(object):
         graph = Graph(host=self.request.registry.settings['neo4j_host'], password=self.request.registry.settings['neo4j_password'])
         influx_client = InfluxDBClient(host=self.request.registry.settings['influx_host'], port=self.request.registry.settings['influx_port'], timeout=10)
 
-        database_name = json_queries["database"]
+        database_name = json_queries["service_function_chain"]
         if database_name not in [db["name"] for db in influx_client.get_list_database()]:
-            raise HTTPBadRequest("Database {0} not found.".format(database_name))
-
-        retention_policy = json_queries["retention_policy"]
-        if retention_policy not in [rp["name"] for rp in influx_client.get_list_retention_policies(database_name)]:
-            raise HTTPBadRequest("Retention policy {0} for database {1} not found.".format(retention_policy, database_name))
+            raise HTTPBadRequest("Database for service function chain {0} not found.".format(database_name))
 
         from_timestamp = params['from'] * 10**9
         to_timestamp = params['to'] * 10**9
@@ -156,20 +152,20 @@ class GraphAPI(object):
             data = graph.run(query_to_execute).data()  # returns a list of dictionaries, each dictionary represents a row in the result
             result = data[0]
 
-        sf_i_node = graph.match(nodes=(None, endpoint_node), r_type="realisedBy").first().start_node
-        if sf_i_node is None:
-            msg = "No service function instance found associated with endpoint {0}".format(endpoint_node["name"])
+        sf_node = graph.match(nodes=(None, endpoint_node), r_type="realisedBy").first().start_node
+        if sf_node is None:
+            msg = "No service function found associated with endpoint {0}".format(endpoint_node["name"])
             log.error("Unexpected error: {0}".format(msg))
             raise HTTPBadRequest(msg)
 
-        sf_node = graph.match(nodes=(sf_i_node, None), r_type="instanceOf").first().end_node
-        if sf_node is None:
-            msg = "No service function found associated with service function instance {0}".format(sf_i_node["name"])
+        sf_package_node = graph.match(nodes=(sf_node, None), r_type="instanceOf").first().end_node
+        if sf_package_node is None:
+            msg = "No service function package found associated with service function {0}".format(sf_node["name"])
             log.error("Unexpected error: {0}".format(msg))
             raise HTTPBadRequest(msg)
 
-        result["global_tags"] = {"ipendpoint": endpoint_node["name"], "host": endpoint_node["host"], "location": hosted_by_node["name"], "sr": endpoint_node["sr"],
-                                 "sfc": reference_node["sfc"], "sfc_i": reference_node["sfc_i"], "sf": sf_node["name"], "sf_i": sf_i_node["name"]}
+        result["global_tags"] = {"sfe": endpoint_node["name"], "server": hosted_by_node["name"], "location": hosted_by_node["name"],
+                                 "sfc": reference_node["sfc"], "sfci": reference_node["sfci"], "sfp": sf_package_node["name"], "sf": sf_node["name"]}
 
         # calculate the Round-Trip-Time
         total_forward_latency = sum(result["forward_latencies"])
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-1.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-1.yaml
index 4848c1811d39b75a4cdc5491655b5f365d409f13..ea52ebfba0e4f7754f6eaff856c327edf88d166a 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-1.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-1.yaml
@@ -43,7 +43,7 @@ topology_template:
               threshold: 100
               granularity: 120
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: gte
@@ -64,7 +64,7 @@ topology_template:
               granularity: 60
               aggregation_method: last
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: lt
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-10.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-10.yaml
index 491d1e7d8c4a295c587d3b12da18bd3c81c8c91e..a3ef03924fdfd3abcf14e9bef9389e2268d5a8be 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-10.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-10.yaml
@@ -43,7 +43,7 @@ topology_template:
               threshold: -100  # requests have decreased by at least 100
               granularity: 120
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: lte
@@ -64,7 +64,7 @@ topology_template:
               granularity: 60
               aggregation_method: last
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: lt
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-2.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-2.yaml
index afa4d867dfaa26d84c3211d65665cf255b880d97..2ae8d168360aaa61d5d27665595ed9ea4ae4975b 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-2.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-2.yaml
@@ -23,7 +23,7 @@ topology_template:
               threshold: 0
               granularity: 60
               resource_type:
-                sf_package: storage
+                sfp: storage
             action:
               implementation:
                 - http://sfemc.flame.eu/notify
@@ -41,7 +41,7 @@ topology_template:
               granularity: 60
               aggregation_method: last
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: less than # invalid comparison operator
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-3.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-3.yaml
index 9d50bca35c8064e68dc84b36450009f04fd8000d..05cde5b0b26195a019184da3bf6614fb432a9bd0 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-3.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-3.yaml
@@ -26,7 +26,7 @@ topology_template:
               granularity: 60
               aggregation_method: last
               resource_type:
-                sf_package_id: storage # sf_package_id is not the correct tag name, it is sf_package
+                sf_package_id: storage # sf_package_id is not the correct tag name, it is sfp
                 sf: storage-users
                 location: watershed
               comparison_operator: lt
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-4.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-4.yaml
index 3f4f7c15000615a4de92e974f7d9ecb32c51b77a..96cc6fa2382059ee8e48483f356c055232609ade 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-4.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-4.yaml
@@ -26,7 +26,7 @@ topology_template:
               granularity: 60
               aggregation_method: average # wrong aggregation method - should be mean, not average
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: lt
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-5.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-5.yaml
index ea60be6b9a9e453daaac03bb13412e830301c50b..8ef6f1372232859e518a2d09356362fab9333286 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-5.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-5.yaml
@@ -26,7 +26,7 @@ topology_template:
               granularity: 60
               aggregation_method: mean
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: lt
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-6.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-6.yaml
index 57decbdc9dbb32aa884bbeeb249128f7392f66b6..944f14b7055b006e900e208e8f92c019ae2d830c 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-6.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-6.yaml
@@ -25,6 +25,7 @@ topology_template:
               aggregation_method: mean
               resource_type:
                 location: watershed
+                server: watershed
               comparison_operator: gt
             action:
               implementation:
@@ -44,7 +45,7 @@ topology_template:
               granularity: 60
               aggregation_method: last
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: lt
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-7.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-7.yaml
index 2285e66035873e2906b9eddc3b49d3ab9462abc6..1c048919c59a560397405ae2991b90de414849e2 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-7.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-7.yaml
@@ -44,7 +44,7 @@ topology_template:
               granularity: 60
               aggregation_method: last
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: lt
@@ -63,7 +63,7 @@ topology_template:
               threshold: 0
               granularity: 60
               resource_type:
-                sf_package: storage
+                sfp: storage
             action:
               implementation:
               - http://sfemc.flame.eu/notify
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-8.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-8.yaml
index bd960b505f7cad18348f2aed4aeab70267607eb6..d108984b55ece5ed26e2437d90f67e9d3c0eabed 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-8.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-8.yaml
@@ -30,7 +30,7 @@ topology_template:
               granularity: 60
               aggregation_method: last
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: lt
@@ -51,7 +51,7 @@ topology_template:
               threshold: 100  # requests have increased by at least 100
               granularity: 120
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: gte
@@ -68,7 +68,7 @@ topology_template:
               threshold: -100  # requests have decreased by at least 100
               granularity: 120
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: lte
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-9.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-9.yaml
index dcb38d11e21057bc37f23956f94ae8d784f9d6fc..b6f981977b6b3ae62e3edde1b5648216d7070b97 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-9.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-9.yaml
@@ -42,7 +42,7 @@ topology_template:
               granularity: 60
               aggregation_method: last
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: lt
diff --git a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-1.yaml b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-1.yaml
index b11c672df6f6304385b13713f867a5a791c1c7b8..1d87586297cd5024463c0091f9eb2062eb92001c 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-1.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-1.yaml
@@ -45,7 +45,7 @@ topology_template:
               granularity: 60
               aggregation_method: last
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: lt
diff --git a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-2.yaml b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-2.yaml
index 0d2e1e12dc700888f4af8e365b2f67c6afcc9394..6fe0e903cafd74389923598f5618d7bb6b769753 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-2.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-2.yaml
@@ -27,7 +27,7 @@ topology_template:
               resource_type:
                 sfc: companyA-VR  # sfc tag is also allowed, even though it is already included in the metadata
                 sfci: companyA-VR-premium # sfci tag is also allowed, even though it is already included in the metadata
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: lte
@@ -48,7 +48,7 @@ topology_template:
               threshold: 0  # if requests are less than or equal to 0 (in other words, no measurements are reported)
               granularity: 60  # check for for missing data for the last 60 seconds
               resource_type:
-                sf_package: storage
+                sfp: storage
             action:
               implementation:
                 - http://sfemc.flame.eu/notify
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-3.yaml b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-3.yaml
index 529152c82fdedc61b38250989b748ed499832ad2..d076ed779187aeb0ece7e6e798e5d106b92a6fa1 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-3.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-3.yaml
@@ -42,7 +42,7 @@ topology_template:
               granularity: 60
               aggregation_method: first
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: lt
diff --git a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-4.yaml b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-4.yaml
index 8c106043f4b6fee825f060bb41d47d313a82ce91..f6a64217a5306f527e1dc4493d9555dbd6f95606 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-4.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-4.yaml
@@ -25,6 +25,7 @@ topology_template:
               aggregation_method: median
               resource_type:
                 location: watershed
+                server: watershed
               comparison_operator: gt
             action:
               implementation:
@@ -60,7 +61,7 @@ topology_template:
               threshold: 0  # if requests are less than or equal to 0 (in other words, no measurements are reported)
               granularity: 60  # check for for missing data for the last 60 seconds
               resource_type:
-                sf_package: storage
+                sfp: storage
               comparison_operator: gte # although events of type deadman do not use a comparison operator, the validator will not complain if one is given, it will simply ignore it
             action:
               implementation:
diff --git a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-5.yaml b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-5.yaml
index 54567e1ad51673681fa6cdd47dc6c72e3fd959af..e6506877ada6021654d47e1b9c94e012e68be327 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-5.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-5.yaml
@@ -61,7 +61,7 @@ topology_template:
               granularity: 120
               aggregation_method: first  # Although events of type relative do not require an aggregation method, the validator will not complain if one is given, it will simply ignore it
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: gte
diff --git a/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-1.yaml b/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-1.yaml
index 92bac7b8d7625a5c59fe3b96f123885755dc9afc..ce07a9f036302a16b230017d83220f7a58b4b186 100644
--- a/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-1.yaml
+++ b/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-1.yaml
@@ -30,7 +30,7 @@ triggers:
         granularity: 60
         aggregation_method: last
         resource_type:
-          sf_package: storage
+          sfp: storage
           sf: storage-users
           location: watershed
         comparison_operator: lt
diff --git a/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-2.yaml b/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-2.yaml
index 4eb465211573b183fc123b02381e6b5c0ed56a24..3b0515936958d6d5d825a4975e9c242d9ac03260 100644
--- a/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-2.yaml
+++ b/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-2.yaml
@@ -42,7 +42,7 @@ topology_template:
               threshold: 100
               granularity: 120
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: gte
diff --git a/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-3.yaml b/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-3.yaml
index 60167c25f9c64599c32784eea93e2e2c8d6c162b..e8b1c0d8604220aa7520b482b40aa3d345adfc60 100644
--- a/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-3.yaml
+++ b/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-3.yaml
@@ -24,6 +24,7 @@ topology_template:
             aggregation_method: mean
             resource_type:
               location: watershed
+              server: watershed
             comparison_operator: gt
           action:
             implementation:
@@ -43,7 +44,7 @@ topology_template:
             granularity: 60
             aggregation_method: last
             resource_type:
-              sf_package: storage
+              sfp: storage
               sf: storage-users
               location: watershed
             comparison_operator: lt
diff --git a/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-4.yaml b/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-4.yaml
index 1fdc4b346e41bc00d3576b4cce3305c758a0fef3..23b74df548e21c8c42ec82f1cd13ed2a7c6090ca 100644
--- a/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-4.yaml
+++ b/src/service/resources/tosca/test-data/tosca-parser/invalid/alerts_test_config-4.yaml
@@ -44,7 +44,7 @@ alerts:
               granularity: 60
               aggregation_method: last
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: lt
diff --git a/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-1.yaml b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-1.yaml
index 3c7019a2ba4bdd0e6d3f0f119a31f5dbbbe4fe7e..d70a41697d01b993be13e21d223f24c8b5edb30f 100644
--- a/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-1.yaml
+++ b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-1.yaml
@@ -25,6 +25,7 @@ topology_template:
               aggregation_method: mean
               resource_type:
                 location: watershed
+                server: watershed
               comparison_operator: gt
             action:
               implementation:
@@ -44,7 +45,7 @@ topology_template:
               granularity: 60
               aggregation_method: last
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: lt
diff --git a/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-2.yaml b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-2.yaml
index 4df849dc46b75eceef0f3bf51fdb2f93bfc4d23e..5de75ef4f5e9c8b60715fbc0575a644a17409b3d 100644
--- a/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-2.yaml
+++ b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-2.yaml
@@ -56,7 +56,7 @@ topology_template:
               threshold: 0
               granularity: 60
               resource_type:
-                sf_package: storage
+                sfp: storage
             action:
               implementation:
               - http://sfemc.flame.eu/notify
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-4.yaml b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-4.yaml
index 7ad8e0e57c8c1ea0d02d6078916f772b20daf2ce..4be8c2f8dce4e2e0169c813a3ca20304749fa606 100644
--- a/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-4.yaml
+++ b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-4.yaml
@@ -26,7 +26,7 @@ topology_template:
               granularity: 60
               aggregation_method: last
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: lt
@@ -43,7 +43,7 @@ topology_template:
               threshold: 100
               granularity: 120
               resource_type:
-                sf_package: storage
+                sfp: storage
                 sf: storage-users
                 location: watershed
               comparison_operator: gte
diff --git a/src/test/clmctest/alerts/alerts_test_config.yaml b/src/test/clmctest/alerts/alerts_test_config.yaml
index 3fd6b04b8c05523e3b411ad8bb41baee4e3f11be..8c469c023b537a7ec20f278b6e1b42942bfa5f39 100644
--- a/src/test/clmctest/alerts/alerts_test_config.yaml
+++ b/src/test/clmctest/alerts/alerts_test_config.yaml
@@ -6,7 +6,7 @@ imports:
 - flame_clmc_alerts_definitions.yaml
 
 metadata:
-  sfc: CLMCMetrics
+  sfc: MS_Template_1
   sfci: MS_I1
 
 topology_template:
@@ -41,7 +41,7 @@ topology_template:
               aggregation_method: mean
               resource_type:
                 location: DC1
-                sf: nginx
+                sfp: nginx
               comparison_operator: gte
             action:
               implementation:
@@ -54,8 +54,11 @@ topology_template:
               threshold: 5
               granularity: 10
               resource_type:
+                sfc: MS_Template_1  # value is already given in metadata so this is optional
+                sfci: MS_I1  # value is already given in metadata so this is optional
+                sfp: nginx
+                sf: adaptive_streaming_nginx_I1
                 location: DC1
-                sf: nginx
               comparison_operator: gte
             action:
               implementation:
@@ -72,8 +75,10 @@ topology_template:
               threshold: 0
               granularity: 5
               resource_type:
-                sf: nginx
-                host: DC1
+                sfc: MS_Template_1  # value is already given in metadata so this is optional
+                sfci: MS_I1  # value is already given in metadata so this is optional
+                sfp: nginx
+                sf: adaptive_streaming_nginx_I1
                 location: DC1
             action:
               implementation:
diff --git a/src/test/clmctest/alerts/resources_test_config.yaml b/src/test/clmctest/alerts/resources_test_config.yaml
index ee1bc956c09b4b009fc8a5efb5ea47ce55f1c54c..45591f166d254eab8840a276a3897d6210f0cf1b 100644
--- a/src/test/clmctest/alerts/resources_test_config.yaml
+++ b/src/test/clmctest/alerts/resources_test_config.yaml
@@ -2,7 +2,7 @@ tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
 
 metadata:
   template_name: Flame CLMC Alerts Integration Test
-  sfc: CLMCMetrics
+  sfc: MS_Template_1
   sfci: MS_I1
 
 
diff --git a/src/test/clmctest/inputs/__init__.py b/src/test/clmctest/inputs/__init__.py
index 44f772595799f5fe338534918c95e23e08e80464..a93a4bf16d8eee8666c6a28c3e40306983804a29 100644
--- a/src/test/clmctest/inputs/__init__.py
+++ b/src/test/clmctest/inputs/__init__.py
@@ -1 +1 @@
-#!/usr/bin/python3
\ No newline at end of file
+#!/usr/bin/python3
diff --git a/src/test/clmctest/inputs/conftest.py b/src/test/clmctest/inputs/conftest.py
index 86218af4ab7734cf0efab8b2d439488769100e28..a4b2639906a719831de9eace1bbad4abe8db9400 100644
--- a/src/test/clmctest/inputs/conftest.py
+++ b/src/test/clmctest/inputs/conftest.py
@@ -19,46 +19,50 @@
 ##
 ##      Created By :            Michael Boniface
 ##      Created Date :          02-03-2018
+##      Updated By :            Nikolay Stanchev
+##      Updated Date :          30-08-2018
 ##      Created for Project :   FLAME
 """
 
 import pytest
 import time
-import yaml
 import json
 import pkg_resources
 from influxdb import InfluxDBClient
 
 
 @pytest.fixture(scope="module")
-def telegraf_agent_config(request):
+def telegraf_agent_config():
     """
     Reads the service configuration deployed for the streaming simulation test.
 
-    :param request: access the parameters of the fixture
     :return: the python object representing the read YAML file
     """
+
     rspec = pkg_resources.resource_filename('clmctest', 'rspec.json')
     print("\nrspec file: {0}".format(rspec))
+
     with open(rspec, 'r') as stream:
         data_loaded = json.load(stream)
+
     return data_loaded
 
 
-@pytest.fixture(params=[{'database': 'CLMCMetrics'}], scope='module')
+@pytest.fixture(params=[{'database': 'MS_Template_1'}], scope='module')
 def influxdb(telegraf_agent_config, request):
     """
     Creates an Influx DB client for the CLMC metrics database with an empty database
 
     :param telegraf_agent_config: the fixture returning the yaml configuration
     :param request: access the parameters of the fixture
+
     :return: the created Influx DB client
     """
    
-    db =  InfluxDBClient(host=telegraf_agent_config[0]['ip_address'], port=8086, database=request.param['database'], timeout=10)
+    db = InfluxDBClient(host=telegraf_agent_config[0]['ip_address'], port=8086, database=request.param['database'], timeout=10)
     db.drop_database(request.param['database'])
 
-    # wait 20 seconds for the 1st measurement to arrive from agents before returning
+    # wait 30 seconds for the 1st measurement to arrive from agents before returning
     time.sleep(30)
 
     return db
diff --git a/src/test/clmctest/inputs/test_rspec.py b/src/test/clmctest/inputs/test_rspec.py
index e56bac91e7335a0fcad399dc41f5ac00ae8c1fd5..ba0e06c9b2037a61f65a86bff819abbf91e6c4b6 100644
--- a/src/test/clmctest/inputs/test_rspec.py
+++ b/src/test/clmctest/inputs/test_rspec.py
@@ -19,6 +19,8 @@
 ##
 ##      Created By :            Michael Boniface
 ##      Created Date :          25-02-2018
+##      Updated By :            Nikolay Stanchev
+##      Updated Date :          30-08-2018
 ##      Created for Project :   FLAME
 """
 
diff --git a/src/test/clmctest/inputs/test_telegraf_agents.py b/src/test/clmctest/inputs/test_telegraf_agents.py
index 88238eaf62b6292561125fb92a4f67228da360f6..d8951d375e6965191983a9be71591cfe8aac9f10 100644
--- a/src/test/clmctest/inputs/test_telegraf_agents.py
+++ b/src/test/clmctest/inputs/test_telegraf_agents.py
@@ -19,55 +19,26 @@
 ##
 ##      Created By :            Michael Boniface
 ##      Created Date :          02-03-2018
+##      Updated By :            Nikolay Stanchev
+##      Updated Date :          30-08-2018
 ##      Created for Project :   FLAME
 """
 
 import pytest
-from subprocess import run
-from platform import system
-from influxdb import InfluxDBClient
-
-@pytest.mark.parametrize("service_name", [
-    ('clmc-service'),
-    ('apache'),
-    ('nginx'),
-    ('mongo'),
-    ('host'),
-    ('minio')
-    ])
-def test_service_name(telegraf_agent_config, service_name):
-    assert any(s['name'] == service_name for s in telegraf_agent_config), "{0} not in list of hosts".format(service_name)
-    
-def test_ping(telegraf_agent_config):
-    """
-    Pings each service to test for liveliness
-
-    :param streaming_sim_config: the configuration fixture collected from conftest.py
-    """
-
-    print("\n")  # blank line printed for formatting purposes
-
-    ping_count = 1
-    system_dependent_param = "-n" if system().lower() == "windows" else "-c"
-
-    for service in telegraf_agent_config:
-        command = ["ping", system_dependent_param, str(ping_count), service['ip_address']]
-        assert run(command).returncode == 0, "Service ping test failed for {0} with ip address {1}".format(service['name'], service['ip_address'])
-        print("\nSuccessfully passed ping test for service: {0}\n".format(service['name']))
 
 
 @pytest.mark.parametrize("measurement, query, expected_result", [
-    ('nginx', 'SELECT mean("requests") AS "mean" FROM "CLMCMetrics"."autogen"."nginx"', 0),
-    ('cpu', 'SELECT mean("usage_idle") AS "mean" FROM "CLMCMetrics"."autogen"."cpu"', 0),
-    ('mongodb', 'SELECT mean("net_in_bytes") AS "mean" FROM "CLMCMetrics"."autogen"."mongodb"', 0),
-    ('net', 'SELECT mean("bytes_sent") AS "mean" FROM "CLMCMetrics"."autogen"."net"', 0),
-    ('disk', 'SELECT mean("free") AS "mean" FROM "CLMCMetrics"."autogen"."disk"', 0),
-    ('mem', 'SELECT mean("free") AS "mean" FROM "CLMCMetrics"."autogen"."mem"', 0),
-    ('service_config_state', 'SELECT mean("loaded.active.running_count") AS "mean" FROM "CLMCMetrics"."autogen"."service_config_state" WHERE "resource"=\'nginx.service\'', 0),
+    ('nginx', 'SELECT mean("requests") AS "mean" FROM "MS_Template_1"."autogen"."nginx"', 0),
+    ('cpu', 'SELECT mean("usage_idle") AS "mean" FROM "MS_Template_1"."autogen"."cpu"', 0),
+    ('mongodb', 'SELECT mean("net_in_bytes") AS "mean" FROM "MS_Template_1"."autogen"."mongodb"', 0),
+    ('net', 'SELECT mean("bytes_sent") AS "mean" FROM "MS_Template_1"."autogen"."net"', 0),
+    ('disk', 'SELECT mean("free") AS "mean" FROM "MS_Template_1"."autogen"."disk"', 0),
+    ('mem', 'SELECT mean("free") AS "mean" FROM "MS_Template_1"."autogen"."mem"', 0),
+    ('service_config_state', 'SELECT mean("loaded.active.running_count") AS "mean" FROM "MS_Template_1"."autogen"."service_config_state" WHERE "resource"=\'nginx.service\'', 0),
     # Report MINIO's HTTP request response time (as a rolling difference of the sum total)
-    ('minio_http_requests_duration_seconds', 'SELECT difference(max("sum")) AS "mean" FROM "CLMCMetrics"."autogen"."minio_http_requests_duration_seconds" WHERE time > now() - 1h GROUP BY time(10s)',0),      
+    ('minio_http_requests_duration_seconds', 'SELECT difference(max("sum")) AS "mean" FROM "MS_Template_1"."autogen"."minio_http_requests_duration_seconds" WHERE time > now() - 1h GROUP BY time(10s)',0),
     # Report the average change in difference of MINIO's HTTP response time (the inner query determines a rolling difference between sampling periods [respTimeDiff])
-    ('minio_http_requests_duration_seconds', 'SELECT mean("respTimeDiff") AS "mean" FROM (SELECT difference(max("sum")) AS "respTimeDiff" FROM "CLMCMetrics"."autogen"."minio_http_requests_duration_seconds" WHERE time > now() - 1h GROUP BY time(10s))',0)              
+    ('minio_http_requests_duration_seconds', 'SELECT mean("respTimeDiff") AS "mean" FROM (SELECT difference(max("sum")) AS "respTimeDiff" FROM "MS_Template_1"."autogen"."minio_http_requests_duration_seconds" WHERE time > now() - 1h GROUP BY time(10s))',0)
     ])
 def test_all_inputs(influxdb, measurement, query, expected_result):
     """
@@ -79,7 +50,7 @@ def test_all_inputs(influxdb, measurement, query, expected_result):
     :param expected_result: the expected result from the query
     """
 
-    query_result = influxdb.query('SHOW measurements ON "CLMCMetrics"')
+    query_result = influxdb.query('SHOW measurements ON "MS_Template_1"')
     points = list(query_result.get_points())
     assert any(p['name'] == measurement for p in points), "{0} not in measurement list".format(measurement)
     
@@ -89,14 +60,25 @@ def test_all_inputs(influxdb, measurement, query, expected_result):
     assert actual_result > expected_result, "actual result {0} is not > expected result {1} for query {2}".format(actual_result, str(expected_result), query)
 
 
-@pytest.mark.parametrize("query, expected_result", 
-    [('filter query', 0),
-     ('filter query', 0),
-     ('filter query', 0)
-    ])
-def test_global_tag_filtering(influxdb, query, expected_result):
-    """Tests that the global tags are inserted correctly into the global configuration using the install CLMC script
+@pytest.mark.parametrize("query", [
+    'SELECT * FROM "MS_Template_1"."autogen"."nginx" GROUP BY *',
+    'SELECT * FROM "MS_Template_1"."autogen"."cpu" GROUP BY *',
+    'SELECT * FROM "MS_Template_1"."autogen"."mongodb" GROUP BY *',
+    'SELECT * FROM "MS_Template_1"."autogen"."net" GROUP BY *',
+    'SELECT * FROM "MS_Template_1"."autogen"."disk" GROUP BY *',
+    'SELECT * FROM "MS_Template_1"."autogen"."mem" GROUP BY *',
+    'SELECT * FROM "MS_Template_1"."autogen"."service_config_state" WHERE "resource"=\'nginx.service\' GROUP BY *',
+    'SELECT * FROM "MS_Template_1"."autogen"."minio_http_requests_duration_seconds" GROUP BY *'
+])
+def test_global_tag_filtering(influxdb, query):
+    """
+    Tests that the global tags are inserted correctly into the global configuration using the install CLMC agent script
+
+    :param influxdb: the influx db client fixture
+    :param query: the query to execute
     """
-    # run query
-    # check result
-    assert 1
+
+    query_result = influxdb.query(query).items()[0]
+    tags = query_result[0][1].keys()
+
+    assert set(tags).issuperset({"sfc", "sfci", "sfp", "sf", "sfe", "server", "location"})
diff --git a/src/test/clmctest/monitoring/E2ESim.py b/src/test/clmctest/monitoring/E2ESim.py
deleted file mode 100644
index 4a3fa2d6d4d9768fed96188d571bc34c22ac0aad..0000000000000000000000000000000000000000
--- a/src/test/clmctest/monitoring/E2ESim.py
+++ /dev/null
@@ -1,159 +0,0 @@
-#!/usr/bin/python3
-"""
-## Copyright University of Southampton IT Innovation Centre, 2018
-##
-## Copyright in this software belongs to University of Southampton
-## IT Innovation Centre of Gamma House, Enterprise Road,
-## Chilworth Science Park, Southampton, SO16 7NS, UK.
-##
-## This software may not be used, sold, licensed, transferred, copied
-## or reproduced in whole or in part in any manner or form or in or
-## on any media by any person other than in accordance with the terms
-## of the Licence Agreement supplied with the software, or otherwise
-## without the prior written consent of the copyright owners.
-##
-## This software is distributed WITHOUT ANY WARRANTY, without even the
-## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-## PURPOSE, except where stated in the Licence Agreement supplied with
-## the software.
-##
-##      Created By :            Michael Boniface
-##      Created Date :          15-04-2018
-##      Updated By :            Nikolay Stanchev
-##      Updated Date :          16-04-2018
-##      Created for Project :   FLAME
-"""
-
-
-from influxdb import InfluxDBClient
-import clmctest.monitoring.LineProtocolGenerator as lp
-import urllib.parse
-import time
-import random
-
-
-class Simulator(object):
-    """
-    Simulator used to generate E2E measurements.
-    """
-
-    DATABASE = 'CLMCMetrics'  # default database name
-    DATABASE_URL = 'http://172.40.231.51:8086'  # default database url
-
-    TICK = 1  # a simulation tick represents 1s
-    SIMULATION_LENGTH = 120  # simulation time in seconds
-
-    def __init__(self, database_url=DATABASE_URL, database=DATABASE):
-        """
-        Initialises the simulator by creating a db client object and resetting the database.
-
-        :param database_url: db url
-        :param database: db name
-        """
-
-        url_object = urllib.parse.urlparse(database_url)
-        self.db_client = InfluxDBClient(host=url_object.hostname, port=url_object.port, database=database, timeout=10)
-
-        self.db_url = database_url
-        self.db_name = database
-
-        self._reset_db()
-
-    def _reset_db(self):
-        """
-        Reset the database using the already initialised db client object.
-        """
-
-        self.db_client.drop_database(self.db_name)
-        self.db_client.create_database(self.db_name)
-
-    def run(self):
-        """
-        Runs the simulation.
-        """
-
-        # all network delays start from 1ms, the dictionary stores the information to report
-        paths = [
-            {
-                'target': 'SR2',
-                'source': 'SR1',
-                'path_id': 'SR1---SR2',
-                'latency': 5,
-                'bandwidth': 100*1024*1024
-            },
-            {
-                'target': 'SR1',
-                'source': 'SR2',
-                'path_id': 'SR1---SR2',
-                'latency': 5,
-                'bandwidth': 100*1024*1024
-            },
-            {
-                'target': 'SR3',
-                'source': 'SR1',
-                'path_id': 'SR1---SR3',
-                'latency': 5,
-                'bandwidth': 100*1024*1024
-            },
-            {
-                'target': 'SR1',
-                'source': 'SR3',
-                'path_id': 'SR1---SR3',
-                'latency': 5,
-                'bandwidth': 100*1024*1024
-            }
-        ]
-
-        service_function_instances = [
-            {
-                'endpoint': 'ms1.flame.org',
-                'sf_instance': 'sr2.ms1.flame.org',  # TODO: what did we decide the sf_instance would look like?
-                'sfr': 'SR2',
-                'service_delay': 40,
-                'cpus': 1
-            },
-            {
-                'endpoint': 'ms1.flame.org',
-                'sf_instance': 'sr3.ms1.flame.org',  # TODO: what did we decide the sf_instance would look like?
-                'sfr': 'SR3',
-                'service_delay': 10,
-                'cpus': 4
-            }
-        ]
-
-        av_request_size = 10 * 1024 * 1024  # average request size measured by service function / Bytes
-        av_response_size = 1 * 1024  # average request size measured by service function / Bytes
-
-        # current time in seconds (to test the aggregation we write influx data points related to future time), so we start from the current time
-        start_time = int(time.time())
-
-        sim_time = start_time
-
-        sample_period_net = 1  # sample period for reporting network delays (measured in seconds)
-        sample_period_media = 5  # sample period for reporting media service delays (measured in seconds)
-
-        for i in range(0, self.SIMULATION_LENGTH):
-            # report one of the network delays every sample_period_net seconds
-            if i % sample_period_net == 0:
-                path = random.choice(paths)
-                self.db_client.write_points(
-                    lp.generate_network_delay_report(path['path_id'], path['source'], path['target'], path['latency'], path['bandwidth'], sim_time))
-
-                # increase/decrease the delay in every sample report (min delay is 1)
-                path['latency'] = max(1, path['latency'] + random.randint(-3, 3))
-
-            # report one of the service_function_instance response times every sample_period_media seconds
-            if i % sample_period_media == 0:
-                service = random.choice(service_function_instances)
-                self.db_client.write_points(lp.generate_service_delay_report(
-                    service['endpoint'], service['sf_instance'], service['sfr'], service['service_delay'], av_request_size, av_response_size, sim_time))
-
-            # increase the time by one simulation tick
-            sim_time += self.TICK
-
-        end_time = sim_time
-        print("Simulation finished. Start time: {0}, End time: {1}".format(start_time, end_time))
-
-
-if __name__ == "__main__":
-    Simulator().run()
diff --git a/src/test/clmctest/monitoring/LineProtocolGenerator.py b/src/test/clmctest/monitoring/LineProtocolGenerator.py
index 20b62120f9ceb9e0bfc44f318025380caebcfb68..3c9c93816dfcdb63b4b76de143d1c49cdf6fec9a 100644
--- a/src/test/clmctest/monitoring/LineProtocolGenerator.py
+++ b/src/test/clmctest/monitoring/LineProtocolGenerator.py
@@ -29,66 +29,6 @@ import uuid
 from random import randint
 
 
-def generate_network_delay_report(path_id, source_sfr, target_sfr, latency, bandwidth, time):
-    """
-    Generates a platform measurement about the network delay between two specific service routers.
-
-    :param path_id: the identifier of the path between the two service routers
-    :param source_sfr: the source service router
-    :param target_sfr: the target service router
-    :param latency: the e2e network delay for traversing the path between the two service routers
-    :param bandwidth: the bandwidth of the path (minimum of bandwidths of the links it is composed of)
-    :param time: the measurement timestamp
-    :return: a list of dict-formatted reports to post on influx
-    """
-
-    result = [{"measurement": "network_delays",
-               "tags": {
-                   "path": path_id,
-                   "source": source_sfr,
-                   "target": target_sfr
-               },
-               "fields": {
-                   "latency": latency,
-                   "bandwidth": bandwidth
-               },
-               "time": _getNSTime(time)
-               }]
-
-    return result
-
-
-def generate_service_delay_report(endpoint, sf_instance, sfr, response_time, request_size, response_size, time):
-    """
-    Generates a service measurement about the media service response time.
-
-    :param endpoint: endpoint of the media component
-    :param sf_instance: service function instance
-    :param sfr: the service function router that connects the endpoint of the SF instance to the FLAME network
-    :param response_time: the media service response time (this is not the response time for the whole round-trip, but only for the processing part of the media service component)
-    :param request_size: the size of the request received by the service in Bytes
-    :param response_size: the size of the response received by the service in Bytes
-    :param time: the measurement timestamp
-    :return: a list of dict-formatted reports to post on influx
-    """
-
-    result = [{"measurement": "service_delays",
-               "tags": {
-                   "endpoint": endpoint,
-                   "sf_instance": sf_instance,
-                   "sfr": sfr
-               },
-               "fields": {
-                   "response_time": response_time,
-                   "request_size": request_size,
-                   "response_size": response_size
-               },
-               "time": _getNSTime(time)
-               }]
-
-    return result
-
-
 # Reports TX and RX, scaling on requested quality
 def generate_network_report(recieved_bytes, sent_bytes, time):
     result = [{"measurement": "net_port_io",
diff --git a/src/test/clmctest/monitoring/__init__.py b/src/test/clmctest/monitoring/__init__.py
index 44f772595799f5fe338534918c95e23e08e80464..a93a4bf16d8eee8666c6a28c3e40306983804a29 100644
--- a/src/test/clmctest/monitoring/__init__.py
+++ b/src/test/clmctest/monitoring/__init__.py
@@ -1 +1 @@
-#!/usr/bin/python3
\ No newline at end of file
+#!/usr/bin/python3
diff --git a/src/test/clmctest/monitoring/conftest.py b/src/test/clmctest/monitoring/conftest.py
index 34457d87e0eaeeac92d0be57d914d77e63d15fb5..ef8cf2b77752191db4b753dca151215ff071cc1e 100644
--- a/src/test/clmctest/monitoring/conftest.py
+++ b/src/test/clmctest/monitoring/conftest.py
@@ -19,6 +19,8 @@
 ##
 ##      Created By :            Michael Boniface
 ##      Created Date :          25-02-2018
+##      Updated By :            Nikolay Stanchev
+##      Updated Date :          30-08-2018
 ##      Created for Project :   FLAME
 """
 
@@ -27,7 +29,6 @@ import json
 import pkg_resources
 from influxdb import InfluxDBClient
 from clmctest.monitoring.StreamingSim import Sim
-from clmctest.monitoring.E2ESim import Simulator
 
 
 @pytest.fixture(scope="module")
@@ -46,7 +47,7 @@ def streaming_sim_config():
     return data_loaded
 
 
-@pytest.fixture(params=[{'database': 'CLMCMetrics'}], scope='module')
+@pytest.fixture(params=[{'database': 'media_service_A'}], scope='module')
 def influx_db(streaming_sim_config, request):
     """
     Creates an Influx DB client for the CLMC metrics database
@@ -70,29 +71,22 @@ def simulator(streaming_sim_config):
 
     influx_url = "http://" + streaming_sim_config[0]['ip_address'] + ":8086"
 
+    agent1_url, agent2_url = None, None
     for service in streaming_sim_config:
         if service['name'] == "ipendpoint1":
-            influx_db_name = service['database_name']
+            influx_db_name = service['sfc_id']
             agent1_url = "http://" + service['ip_address'] + ":8186"
         elif service['name'] == "ipendpoint2":
             agent2_url = "http://" + service['ip_address'] + ":8186"
 
+        if agent1_url is not None and agent2_url is not None:
+            break
+
+    assert agent1_url is not None, "Configuration error for ipendpoint1"
+    assert agent2_url is not None, "Configuration error for ipendpoint2"
+
     simulator = Sim(influx_url, influx_db_name, agent1_url, agent2_url)
 
     simulator.reset()
 
     return simulator
-
-
-@pytest.fixture(scope="module")
-def e2e_simulator(streaming_sim_config):
-    """
-    A fixture to obtain a simulator instance with the configuration parameters.
-
-    :param streaming_sim_config: the configuration object
-    :return: an instance of the E2E simulator
-    """
-
-    influx_url = "http://" + streaming_sim_config[0]['ip_address'] + ":8086"
-
-    return Simulator(database_url=influx_url)
diff --git a/src/test/clmctest/monitoring/test_e2eresults.py b/src/test/clmctest/monitoring/test_e2eresults.py
deleted file mode 100644
index 9c957d684c677c39539c22570eb328a155a1af16..0000000000000000000000000000000000000000
--- a/src/test/clmctest/monitoring/test_e2eresults.py
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/python3
-"""
-## © University of Southampton IT Innovation Centre, 2018
-##
-## Copyright in this software belongs to University of Southampton
-## IT Innovation Centre of Gamma House, Enterprise Road,
-## Chilworth Science Park, Southampton, SO16 7NS, UK.
-##
-## This software may not be used, sold, licensed, transferred, copied
-## or reproduced in whole or in part in any manner or form or in or
-## on any media by any person other than in accordance with the terms
-## of the Licence Agreement supplied with the software, or otherwise
-## without the prior written consent of the copyright owners.
-##
-## This software is distributed WITHOUT ANY WARRANTY, without even the
-## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-## PURPOSE, except where stated in the Licence Agreement supplied with
-## the software.
-##
-##      Created By :            Nikolay Stanchev
-##      Created Date :          17-04-2018
-##      Created for Project :   FLAME
-"""
-
-import pytest
-import time
-import requests
-import urllib.parse
-
-
-class TestE2ESimulation(object):
-    """
-    A testing class used to group all the tests related to the E2E simulation data
-    """
-
-    @pytest.fixture(scope='class', autouse=True)
-    def run_simulator(self, e2e_simulator):
-        """
-        A fixture, which runs the simulation before running the tests.
-
-        :param e2e_simulator: the simulator for the end-to-end data
-        """
-
-        # Configure the aggregator through the CLMC service
-        influx_url = urllib.parse.urlparse(e2e_simulator.db_url)
-        aggregator_control_url = "http://{0}:9080/aggregator/control".format(influx_url.hostname)
-        aggregator_config_url = "http://{0}:9080/aggregator/config".format(influx_url.hostname)
-
-        print("Configuring aggregator with request to {0} ...".format(aggregator_config_url))
-        r = requests.put(aggregator_config_url, json={"aggregator_report_period": 5, "aggregator_database_name": e2e_simulator.db_name, "aggregator_database_url": e2e_simulator.db_url})
-        assert r.status_code == 200
-
-        print("Running simulation, please wait...")
-        e2e_simulator.run()
-
-        print("Starting aggregator with request to {0}...".format(aggregator_control_url))
-        r = requests.put(aggregator_control_url, json={"action": "start"})  # start the aggregator through the CLMC service
-        assert r.status_code == 200
-
-        print("Waiting for INFLUX to finish receiving data...")
-        time.sleep(e2e_simulator.SIMULATION_LENGTH)  # wait for data to finish arriving at the INFLUX database
-        print("... simulation data fixture finished")
-
-        print("... stopping aggregator with request to {0}...".format(aggregator_control_url))
-        r = requests.put(aggregator_control_url, json={"action": "stop"})  # stop the aggregator through the CLMC service
-        assert r.status_code == 200
-
-    @pytest.mark.parametrize("query, expected_result, equal_comparison", [
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."network_delays"',
-         {"time": "1970-01-01T00:00:00Z", "count_latency": 120, "count_bandwidth": 120}, True),
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."service_delays"',
-         {"time": "1970-01-01T00:00:00Z", "count_response_time": 24, "count_request_size": 24, "count_response_size": 24}, True),
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."e2e_delays"',
-         {"time": "1970-01-01T00:00:00Z", "count_delay_forward": 40, "count_delay_reverse": 40, "count_delay_service": 40,
-          "count_avg_request_size": 40, "count_avg_response_size": 40, "count_avg_bandwidth": 40}, False),
-        ])
-    def test_simulation(self, influx_db, query, expected_result, equal_comparison):
-        """
-        This is the entry point of the test. This method will be found and executed when the module is ran using pytest
-
-        :param query: the query to execute (value obtained from the pytest parameter decorator)
-        :param expected_result: the result expected from executing the query (value obtained from the pytest parameter decorator)
-        :param influx_db the import db client fixture - imported from contest.py
-        """
-
-        # pytest automatically goes through all queries under test, declared in the parameters decorator
-        print("\n")  # prints a blank line for formatting purposes
-
-        # the raise_errors=False argument is given so that we could actually test that the DB didn't return any errors instead of raising an exception
-        query_result = influx_db.query(query, raise_errors=False)
-
-        # test the error attribute of the result is None, that is no error is returned from executing the DB query
-        assert query_result.error is None, "An error was encountered while executing query {0}.".format(query)
-
-        # get the dictionary of result points; the next() function just gets the first element of the query results generator (we only expect one item in the generator)
-        actual_result = next(query_result.get_points())
-
-        # check if we want to compare for equality or for '>='
-        if equal_comparison:
-            assert expected_result == actual_result, "E2E Simulation test failure"
-        else:
-            for key in expected_result:
-                assert actual_result[key] >= expected_result[key], "E2E Simulation test failure"
diff --git a/src/test/clmctest/monitoring/test_rspec.py b/src/test/clmctest/monitoring/test_rspec.py
index 999b98c4e8ee6c57505e1f2059f194e33b3a19a7..315ebe562b7718b5d587471d472903e6d65597b2 100644
--- a/src/test/clmctest/monitoring/test_rspec.py
+++ b/src/test/clmctest/monitoring/test_rspec.py
@@ -19,6 +19,8 @@
 ##
 ##      Created By :            Michael Boniface
 ##      Created Date :          25-02-2018
+##      Updated By :            Nikolay Stanchev
+##      Updated Date :          30-08-2018
 ##      Created for Project :   FLAME
 """
 
@@ -56,7 +58,10 @@ def test_ping(streaming_sim_config):
     ping_count = 1
     system_dependent_param = "-n" if system().lower() == "windows" else "-c"
 
+    services = {'clmc-service', 'ipendpoint1', 'ipendpoint2'}
+
     for service in streaming_sim_config:
-        command = ["ping", system_dependent_param, str(ping_count), service['ip_address']]
-        assert run(command).returncode == 0, "Service ping test failed for {0} with ip address {1}".format(service['name'], service['ip_address'])
-        print("\nSuccessfully passed ping test for service: {0}\n".format(service['name']))
+        if service["name"] in services:  # test only the scenario specific services
+            command = ["ping", system_dependent_param, str(ping_count), service['ip_address']]
+            assert run(command).returncode == 0, "Service ping test failed for {0} with ip address {1}".format(service['name'], service['ip_address'])
+            print("\nSuccessfully passed ping test for service: {0}\n".format(service['name']))
diff --git a/src/test/clmctest/monitoring/test_simresults.py b/src/test/clmctest/monitoring/test_simresults.py
index 9d8670e7957e417554916f697e4e20af8fcf04be..9bbacc41f21fde261b9f4d1c16a2070767f3d58e 100644
--- a/src/test/clmctest/monitoring/test_simresults.py
+++ b/src/test/clmctest/monitoring/test_simresults.py
@@ -44,71 +44,71 @@ class TestSimulation(object):
         print( "... simulation data fixture finished" )
 
     @pytest.mark.parametrize("query, expected_result", [
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."cpu_usage"',
+        ('SELECT count(*) FROM "media_service_A"."autogen"."cpu_usage"',
          {"time": "1970-01-01T00:00:00Z", "count_cpu_active_time": 7200, "count_cpu_idle_time": 7200, "count_cpu_usage": 7200}),
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."ipendpoint_route"',
+        ('SELECT count(*) FROM "media_service_A"."autogen"."ipendpoint_route"',
          {"time": "1970-01-01T00:00:00Z", "count_http_requests_fqdn_m": 7200, "count_network_fqdn_latency": 7200}),
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."mpegdash_service"',
+        ('SELECT count(*) FROM "media_service_A"."autogen"."mpegdash_service"',
          {"time": "1970-01-01T00:00:00Z", "count_avg_response_time": 7200, "count_peak_response_time": 7200, "count_requests": 7200}),
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."net_port_io"',
+        ('SELECT count(*) FROM "media_service_A"."autogen"."net_port_io"',
          {"time": "1970-01-01T00:00:00Z", "count_RX_BYTES_PORT_M": 7200, "count_TX_BYTES_PORT_M": 7200}),
 
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
+        ('SELECT count(*) FROM "media_service_A"."autogen"."endpoint_config" WHERE sfe=\'endpoint1.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "count_current_state_time": 3639, "count_unplaced_sum": 3639, "count_unplaced_mst": 3639, "count_placing_sum": 3639, "count_placing_mst": 3639, "count_placed_sum": 3639, "count_placed_mst": 3639, "count_booting_sum": 3639, "count_booting_mst": 3639, "count_booted_sum": 3639,
           "count_booted_mst": 3639, "count_connecting_sum": 3639, "count_connecting_mst": 3639, "count_connected_sum": 3639, "count_connected_mst": 3639, "count_cpus": 3639, "count_memory": 3639, "count_storage": 3639}),
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
+        ('SELECT count(*) FROM "media_service_A"."autogen"."endpoint_config" WHERE sfe=\'endpoint2.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "count_current_state_time": 3639, "count_unplaced_sum": 3639, "count_unplaced_mst": 3639, "count_placing_sum": 3639, "count_placing_mst": 3639, "count_placed_sum": 3639, "count_placed_mst": 3639, "count_booting_sum": 3639, "count_booting_mst": 3639, "count_booted_sum": 3639,
           "count_booted_mst": 3639, "count_connecting_sum": 3639, "count_connecting_mst": 3639, "count_connected_sum": 3639, "count_connected_mst": 3639, "count_cpus": 3639, "count_memory": 3639, "count_storage": 3639}),
 
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
+        ('SELECT count(*) FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE sfe=\'endpoint1.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "count_current_state_time": 3607, "count_running_mst": 3607, "count_running_sum": 3607, "count_starting_mst": 3607, "count_starting_sum": 3607, "count_stopped_mst": 3607, "count_stopped_sum": 3607, "count_stopping_mst": 3607, "count_stopping_sum": 3607}),
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
+        ('SELECT count(*) FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE sfe=\'endpoint2.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "count_current_state_time": 3607, "count_running_mst": 3607, "count_running_sum": 3607, "count_starting_mst": 3607, "count_starting_sum": 3607, "count_stopped_mst": 3607, "count_stopped_sum": 3607, "count_stopping_mst": 3607, "count_stopping_sum": 3607}),
 
-        ('SELECT mean(unplaced_mst) as "unplaced_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE unplaced_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
+        ('SELECT mean(unplaced_mst) as "unplaced_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE unplaced_mst <> 0 and sfe=\'endpoint1.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "unplaced_mst": 0.7}),
-        ('SELECT mean(placing_mst) as "placing_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE placing_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
+        ('SELECT mean(placing_mst) as "placing_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE placing_mst <> 0 and sfe=\'endpoint1.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "placing_mst": 9.4}),
-        ('SELECT mean(placed_mst) as "placed_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE placed_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
+        ('SELECT mean(placed_mst) as "placed_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE placed_mst <> 0 and sfe=\'endpoint1.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "placed_mst": 1.7000000000000002}),
-        ('SELECT mean(booting_mst) as "booting_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE booting_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
+        ('SELECT mean(booting_mst) as "booting_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE booting_mst <> 0 and sfe=\'endpoint1.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "booting_mst": 9.6}),
-        ('SELECT mean(booted_mst) as "booted_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE booted_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
+        ('SELECT mean(booted_mst) as "booted_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE booted_mst <> 0 and sfe=\'endpoint1.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "booted_mst": 2.1}),
-        ('SELECT mean(connecting_mst) as "connecting_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE connecting_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
+        ('SELECT mean(connecting_mst) as "connecting_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE connecting_mst <> 0 and sfe=\'endpoint1.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "connecting_mst":  10.2}),
-        ('SELECT mean(connected_mst) as "connected_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE connected_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
+        ('SELECT mean(connected_mst) as "connected_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE connected_mst <> 0 and sfe=\'endpoint1.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "connected_mst": 3605.0}),
-        ('SELECT mean(unplaced_mst) as "unplaced_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE unplaced_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
+        ('SELECT mean(unplaced_mst) as "unplaced_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE unplaced_mst <> 0 and sfe=\'endpoint2.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "unplaced_mst": 0.7}),
-        ('SELECT mean(placing_mst) as "placing_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE placing_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
+        ('SELECT mean(placing_mst) as "placing_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE placing_mst <> 0 and sfe=\'endpoint2.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "placing_mst": 9.4}),
-        ('SELECT mean(placed_mst) as "placed_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE placed_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
+        ('SELECT mean(placed_mst) as "placed_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE placed_mst <> 0 and sfe=\'endpoint2.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "placed_mst": 1.7000000000000002}),
-        ('SELECT mean(booting_mst) as "booting_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE booting_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
+        ('SELECT mean(booting_mst) as "booting_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE booting_mst <> 0 and sfe=\'endpoint2.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "booting_mst": 9.6}),
-        ('SELECT mean(booted_mst) as "booted_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE booted_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
+        ('SELECT mean(booted_mst) as "booted_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE booted_mst <> 0 and sfe=\'endpoint2.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "booted_mst": 2.1}),
-        ('SELECT mean(connecting_mst) as "connecting_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE connecting_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
+        ('SELECT mean(connecting_mst) as "connecting_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE connecting_mst <> 0 and sfe=\'endpoint2.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "connecting_mst":  10.2}),
-        ('SELECT mean(connected_mst) as "connected_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE connected_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
+        ('SELECT mean(connected_mst) as "connected_mst" FROM "media_service_A"."autogen"."endpoint_config" WHERE connected_mst <> 0 and sfe=\'endpoint2.ms-A.ict-flame.eu\'',
          {"time": "1970-01-01T00:00:00Z", "connected_mst": 3605.0}),
 
-        ('SELECT mean(stopped_sum) as "stopped_sum" FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE stopped_sum <> 0',
+        ('SELECT mean(stopped_sum) as "stopped_sum" FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE stopped_sum <> 0',
          {"time": "1970-01-01T00:00:00Z", "stopped_sum": 0.2}),
-        ('SELECT mean(stopped_mst) as "stopped_mst" FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE stopped_mst <> 0',
+        ('SELECT mean(stopped_mst) as "stopped_mst" FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE stopped_mst <> 0',
          {"time": "1970-01-01T00:00:00Z", "stopped_mst": 0.2}),
-        ('SELECT mean(starting_sum) as "starting_sum" FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE starting_sum <> 0',
+        ('SELECT mean(starting_sum) as "starting_sum" FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE starting_sum <> 0',
          {"time": "1970-01-01T00:00:00Z", "starting_sum": 5.5}),
-        ('SELECT mean(starting_mst) as "starting_mst" FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE starting_mst <> 0',
+        ('SELECT mean(starting_mst) as "starting_mst" FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE starting_mst <> 0',
          {"time": "1970-01-01T00:00:00Z", "starting_mst": 5.5}),
-        ('SELECT mean(running_sum) as "running_sum" FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE running_sum <> 0',
+        ('SELECT mean(running_sum) as "running_sum" FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE running_sum <> 0',
          {"time": "1970-01-01T00:00:00Z", "running_sum": 3602.1000000000004}),
-        ('SELECT mean(running_mst) as "running_mst" FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE running_mst <> 0',
+        ('SELECT mean(running_mst) as "running_mst" FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE running_mst <> 0',
          {"time": "1970-01-01T00:00:00Z", "running_mst": 3602.1000000000004}),
-        ('SELECT mean(stopping_sum) as "stopping_sum" FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE stopping_sum <> 0',
+        ('SELECT mean(stopping_sum) as "stopping_sum" FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE stopping_sum <> 0',
          {"time": "1970-01-01T00:00:00Z", "stopping_sum": 1.1}),
-        ('SELECT mean(stopping_mst) as "stopping_mst" FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE stopping_mst <> 0',
+        ('SELECT mean(stopping_mst) as "stopping_mst" FROM "media_service_A"."autogen"."mpegdash_mc_config" WHERE stopping_mst <> 0',
          {"time": "1970-01-01T00:00:00Z", "stopping_mst": 1.1}),
     ])
     def test_simulation(self, influx_db, query, expected_result):
diff --git a/src/test/clmctest/rspec.json b/src/test/clmctest/rspec.json
index 97d18cd6cb29c5eb1d267c6308ef0efabafe8e62..524d25bd91ad1fb0ba6634b9ab4bb1dc69602824 100644
--- a/src/test/clmctest/rspec.json
+++ b/src/test/clmctest/rspec.json
@@ -1,9 +1,6 @@
 [{
     "name": "clmc-service",
     "ip_address": "172.40.231.51",
-    "influxdb_url": "http://172.40.231.51:8086",
-    "database_name": "CLMCMetrics",
-    "report_period": "25",  
     "forward_ports": [
         { "guest": 8086, "host": 8086 },
         { "guest": 8888, "host": 8888 },
@@ -16,26 +13,22 @@
     "ip_address": "172.40.231.150",
     "location": "DC1",
     "sfc_id": "MS_Template_1",
-    "sfc_id_instance": "MS_I1",
-    "sf_id": "apache",
-    "sf_id_instance": "adaptive_streaming_I1",
-    "ipendpoint_id": "adaptive_streaming_I1_apache1",
-    "sr_id": "service_router",
-    "influxdb_url": "http://172.40.231.51:8086",
-    "database_name": "CLMCMetrics"
+    "sfc_instance_id": "MS_I1",
+    "sf_package_id": "apache",
+    "sf_id": "adaptive_streaming_I1",
+    "sf_endpoint_id": "adaptive_streaming_I1_apache1",
+    "influxdb_url": "http://172.40.231.51:8086"
 },
 {
     "name": "nginx",
     "ip_address": "172.40.231.151",
     "location": "DC1",
     "sfc_id": "MS_Template_1",
-    "sfc_id_instance": "MS_I1",
-    "sf_id": "nginx",
-    "sf_id_instance": "adaptive_streaming_nginx_I1",
-    "ipendpoint_id": "adaptive_streaming_nginx_I1_apache1",
-    "sr_id": "service_router",
-    "influxdb_url": "http://172.40.231.51:8086",
-    "database_name": "CLMCMetrics"
+    "sfc_instance_id": "MS_I1",
+    "sf_package_id": "nginx",
+    "sf_id": "adaptive_streaming_nginx_I1",
+    "sf_endpoint_id": "adaptive_streaming_nginx_I1_apache1",
+    "influxdb_url": "http://172.40.231.51:8086"
 },
 {
     "name": "mongo",
@@ -43,13 +36,11 @@
     "ip_address": "172.40.231.152",
     "location": "DC1",
     "sfc_id": "MS_Template_1",
-    "sfc_id_instance": "MS_I1",
-    "sf_id": "mongo",
-    "sf_id_instance": "metadata_database_I1",
-    "ipendpoint_id": "metadata_database_I1_apache1",
-    "sr_id": "service_router",
-    "influxdb_url": "http://172.40.231.51:8086",
-    "database_name": "CLMCMetrics"
+    "sfc_instance_id": "MS_I1",
+    "sf_package_id": "mongo",
+    "sf_id": "metadata_database_I1",
+    "sf_endpoint_id": "metadata_database_I1_apache1",
+    "influxdb_url": "http://172.40.231.51:8086"
 },
 {
     "name": "host",    
@@ -57,52 +48,44 @@
     "ip_address": "172.40.231.154",
     "location": "DC1",
     "sfc_id": "MS_Template_1",
-    "sfc_id_instance": "MS_I1",
-    "sf_id": "host",
-    "sf_id_instance": "adaptive_streaming_I1",
-    "ipendpoint_id": "adaptive_streaming_I1_apache1",
-    "sr_id": "service_router",
-    "influxdb_url": "http://172.40.231.51:8086",
-    "database_name": "CLMCMetrics"
+    "sfc_instance_id": "MS_I1",
+    "sf_package_id": "host",
+    "sf_id": "adaptive_streaming_I1",
+    "sf_endpoint_id": "adaptive_streaming_I1_apache1",
+    "influxdb_url": "http://172.40.231.51:8086"
 },
 {
     "name": "minio",    
     "ip_address": "172.40.231.155",
     "location": "DC1",
     "sfc_id": "MS_Template_1",
-    "sfc_id_instance": "MS_I1",
-    "sf_id": "minio",
-    "sf_id_instance": "adaptive_streaming_I1",
-    "ipendpoint_id": "adaptive_streaming_I1_minio",
-    "sr_id": "service_router",
-    "influxdb_url": "http://172.40.231.51:8086",
-    "database_name": "CLMCMetrics"
+    "sfc_instance_id": "MS_I1",
+    "sf_package_id": "minio",
+    "sf_id": "adaptive_streaming_I1",
+    "sf_endpoint_id": "adaptive_streaming_I1_minio",
+    "influxdb_url": "http://172.40.231.51:8086"
 },
 {
     "name": "ipendpoint1",
     "ip_address": "172.40.231.170",
     "location": "nova",
     "sfc_id": "media_service_A",
-    "sfc_id_instance": "StackID",
-    "sf_id": "ipendpoint",
-    "sf_id_instance": "ms-A.ict-flame.eu",
-    "ipendpoint_id": "endpoint1.ms-A.ict-flame.eu",
-    "sr_id": "service_router",
-    "influxdb_url": "http://172.40.231.51:8086",
-    "database_name": "CLMCMetrics"
+    "sfc_instance_id": "StackID",
+    "sf_package_id": "ipendpoint",
+    "sf_id": "ms-A.ict-flame.eu",
+    "sf_endpoint_id": "endpoint1.ms-A.ict-flame.eu",
+    "influxdb_url": "http://172.40.231.51:8086"
 },
 {
     "name": "ipendpoint2", 
     "ip_address": "172.40.231.171",
     "location": "nova",
     "sfc_id": "media_service_A",
-    "sfc_id_instance": "StackID",
-    "sf_id": "ipendpoint",
-    "sf_id_instance": "ms-A.ict-flame.eu",
-    "ipendpoint_id": "endpoint2.ms-A.ict-flame.eu",
-    "sr_id": "service_router",
-    "influxdb_url": "http://172.40.231.51:8086",
-    "database_name": "CLMCMetrics"
+    "sfc_instance_id": "StackID",
+    "sf_package_id": "ipendpoint",
+    "sf_id": "ms-A.ict-flame.eu",
+    "sf_endpoint_id": "endpoint2.ms-A.ict-flame.eu",
+    "influxdb_url": "http://172.40.231.51:8086"
 },
 {
     "name": "test-runner",
diff --git a/src/test/clmctest/scripts/__init__.py b/src/test/clmctest/scripts/__init__.py
index 44f772595799f5fe338534918c95e23e08e80464..a93a4bf16d8eee8666c6a28c3e40306983804a29 100644
--- a/src/test/clmctest/scripts/__init__.py
+++ b/src/test/clmctest/scripts/__init__.py
@@ -1 +1 @@
-#!/usr/bin/python3
\ No newline at end of file
+#!/usr/bin/python3
diff --git a/src/test/clmctest/scripts/test_config_telegraf.py b/src/test/clmctest/scripts/test_config_telegraf.py
index acd8866fc62f7fac51f8c6bf0f844b032d98c6f3..ca23772481d256e5b3974319c649ee3becbe1240 100644
--- a/src/test/clmctest/scripts/test_config_telegraf.py
+++ b/src/test/clmctest/scripts/test_config_telegraf.py
@@ -19,37 +19,38 @@
 ##
 ##      Created By :            Michael Boniface
 ##      Created Date :          20-03-2018
+##      Updated By :            Nikolay Stanchev
+##      Updated Date :          30-08-2018
 ##      Created for Project :   FLAME
 """
 
-import pytest
 import subprocess
 
+
 def test_write_telegraf_conf():
+    # test telegraf monitoring configuration
+    TELEGRAF_CONF_DIR = "/etc/telegraf"
+    LOCATION = "DC1"
+    SFC_ID = "media_service_A"
+    SFC_INSTANCE_ID = "media_service_A_instance"
+    SF_PACKAGE_ID = "streaming_service"
+    SF_ID = "streaming_service_instance"
+    SF_ENDPOINT_ID = "endpoint"
+    INFLUXDB_URL = "http://172.29.236.10"
+    DATABASE_NAME = SFC_ID
 
-  # test telegraf monitoring configuration
-  TELEGRAF_CONF_DIR="/etc/telegraf"
-  LOCATION="DC1"
-  SFC_ID="media_service_A"
-  SFC_ID_INSTANCE="media_service_A_instance"
-  SF_ID="streaming_service"
-  SF_ID_INSTANCE="streaming_service_instance"
-  IP_ENDPOINT_ID="endpoint"
-  SR_ID="service_router"  
-  INFLUXDB_URL="http://172.29.236.10"
-  DATABASE_NAME="experimentation_database"  
-
-  try:
     # mk telegraf conf directory
+    cmd = 'sudo mkdir -p /etc/telegraf'
+    (out, err, code) = run_command(cmd)
+    assert code == 0, "Failed to create telegraf conf dir : " + str(code) + ", cmd=" + cmd
 
-    (out, err, code) = run_command('sudo mkdir -p /etc/telegraf')
-    assert code == 0, "Failed to create telegraf conf dir : " + str(code) + ", cmd=" + cmd    
-
-    (out, err, code) = run_command('sudo mkdir -p /etc/telegraf/telegraf.d')
-    assert code == 0, "Failed to create telegraf include dir : " + str(code) + ", cmd=" + cmd  
+    cmd = 'sudo mkdir -p /etc/telegraf/telegraf.d'
+    (out, err, code) = run_command(cmd)
+    assert code == 0, "Failed to create telegraf include dir : " + str(code) + ", cmd=" + cmd
 
-    # run write config template  script with no telegraf.d directory
-    (out, err, code) = run_command('sudo cp /vagrant/scripts/clmc-agent/telegraf.conf /etc/telegraf/')
+    # run write config template script with no telegraf.d directory
+    cmd = 'sudo cp /vagrant/scripts/clmc-agent/telegraf.conf /etc/telegraf/'
+    (out, err, code) = run_command(cmd)
     assert code == 0, "Failed to copy telegraf.conf : " + str(code) + ", cmd=" + cmd
 
     cmd = 'sudo cp /vagrant/scripts/clmc-agent/telegraf_output.conf /etc/telegraf/telegraf.d/'
@@ -57,56 +58,50 @@ def test_write_telegraf_conf():
     assert code == 0, "Failed to copy telegraf_output.conf : " + str(code) + ", cmd=" + cmd
 
     # run template relacement script with incorrect arguments
-    cmd = 'sudo /vagrant/scripts/clmc-agent/configure.sh' 
+    cmd = 'sudo /vagrant/scripts/clmc-agent/configure.sh'
     (out, err, code) = run_command(cmd)
-    assert code == 1, "Failed to return error on incorrect arguments : " + str(code) + ", cmd=" + cmd  
+    assert code == 1, "Failed to return error on incorrect arguments : " + str(code) + ", cmd=" + cmd
 
     # run template relacement script with all arguments
-    cmd = 'sudo /vagrant/scripts/clmc-agent/configure.sh ' + LOCATION + ' ' + SFC_ID + ' ' + SFC_ID_INSTANCE + ' ' + SF_ID + ' ' + SF_ID_INSTANCE + ' ' + IP_ENDPOINT_ID + ' ' + SR_ID + ' ' + INFLUXDB_URL + ' ' + DATABASE_NAME
+    cmd = 'sudo /vagrant/scripts/clmc-agent/configure.sh ' + LOCATION + ' ' + SFC_ID + ' ' + SFC_INSTANCE_ID + ' ' + SF_PACKAGE_ID + ' ' + SF_ID + ' ' + SF_ENDPOINT_ID + ' ' + INFLUXDB_URL
     (out, err, code) = run_command(cmd)
     assert code == 0, "Configure command returned error, output=" + str(out) + ", cmd=" + cmd
 
     # check that replacement was correct in telegraf.conf
-    try:        
-        TELEGRAF_GENERAL_CONF_FILE = TELEGRAF_CONF_DIR + "/telegraf.conf"
+    TELEGRAF_GENERAL_CONF_FILE = TELEGRAF_CONF_DIR + "/telegraf.conf"
+    try:
         with open(TELEGRAF_GENERAL_CONF_FILE) as general_conf:
-          lines = general_conf.read()          
-          assert lines.find(LOCATION), "Cannot find location" 
-          assert lines.find(SFC_ID), "Cannot find sfc_id"
-          assert lines.find(SFC_ID_INSTANCE), "Cannot find sfc_id_instance"  
-          assert lines.find(SF_ID), "Cannot find sfc_id"            
-          assert lines.find(SF_ID_INSTANCE), "Cannot find sf_id_instance"
-          assert lines.find(IP_ENDPOINT_ID), "Cannot find endpoint"      
-          assert lines.find(SR_ID), "Cannot find sr_id"                                
+            lines = general_conf.read()
+            assert lines.find(LOCATION), "Cannot find location"
+            assert lines.find(SFC_ID), "Cannot find sfc_id"
+            assert lines.find(SFC_INSTANCE_ID), "Cannot find sfc_instance_id"
+            assert lines.find(SF_PACKAGE_ID), "Cannot find sf_package_id"
+            assert lines.find(SF_ID), "Cannot find sf_id"
+            assert lines.find(SF_ENDPOINT_ID), "Cannot find sf_endpoint_id"
     except FileNotFoundError:
         assert False, "Telegraf general conf file not found, " + TELEGRAF_GENERAL_CONF_FILE
 
     # check that replacement was correct in telegraf_output.conf
+    TELEGRAF_OUTPUT_CONF_FILE = TELEGRAF_CONF_DIR + "/telegraf.d/telegraf_output.conf"
     try:
-        TELEGRAF_OUTPUT_CONF_FILE = TELEGRAF_CONF_DIR + "/telegraf.d/telegraf_output.conf"
         with open(TELEGRAF_OUTPUT_CONF_FILE) as output_conf:
-          lines = output_conf.read()
-          assert lines.find(INFLUXDB_URL), "Cannot find influx_db" 
-          assert lines.find(DATABASE_NAME), "Cannot find database"                    
+            lines = output_conf.read()
+            assert lines.find(INFLUXDB_URL), "Cannot find influx_db url"
+            assert lines.find(DATABASE_NAME), "Cannot find database"
     except FileNotFoundError:
         assert False, "Telegraf output conf file not found, " + TELEGRAF_OUTPUT_CONF_FILE
 
-  finally:
-      # clean up telegraf after test
-#      run_command("sudo rm -rf /etc/telegraf")
-       print("final")
-# wrapper for executing commands on the cli, returning (std_out, std_error, process_return_code)
+
 def run_command(cmd):
-    """Run a shell command.
+    """
+    Run a shell command. Wrapper for executing commands on the cli, returning (std_out, std_error, process_return_code)
 
-    Arguments:
-        cmd {string} -- command to run in the shell
+    :param cmd: {string} -- command to run in the shell
 
-    Returns:
-        stdout, stderr, exit code -- tuple of the process's stdout, stderr and exit code (0 on success)
+    :return: stdout, stderr, exit code -- tuple of the process's stdout, stderr and exit code (0 on success)
     """
+
     proc = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True)
     out, err = proc.communicate()
     return_code = proc.returncode
     return out, err, return_code
-