diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index b511596b470f4126c6a30ee6fb0e22b170e4d25d..3f7949f802935ec34c2a5cde9897f20841f075c3 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,7 +1,7 @@
 ## © University of Southampton IT Innovation Centre, 2018
 ##
 ## Copyright in this software belongs to University of Southampton
-## IT Innovation Centre of Gamma House, Enterprise Road, 
+## IT Innovation Centre of Gamma House, Enterprise Road,
 ## Chilworth Science Park, Southampton, SO16 7NS, UK.
 ##
 ## This software may not be used, sold, licensed, transferred, copied
@@ -18,7 +18,7 @@
 ##      Created By :            Michael Boniface
 ##      Created Date :          21-03-2018
 ##      Created for Project :   FLAME
-stages:  
+stages:
   - build
   - test
   - clean
@@ -26,13 +26,13 @@ stages:
 build:tests:
   stage: build
   only:
-    - schedules    
-  script: 
+    - schedules
+  script:
     - echo $CI_PROJECT_DIR
     - cd $CI_PROJECT_DIR/src/test
     - python setup.py sdist --dist-dir=$CI_PROJECT_DIR/build
     - cd $CI_PROJECT_DIR/src/service
-    - python setup.py sdist --dist-dir=$CI_PROJECT_DIR/build  
+    - python setup.py sdist --dist-dir=$CI_PROJECT_DIR/build
   artifacts:
     paths:
     - build/clmctest-SNAPSHOT.tar.gz
@@ -42,11 +42,11 @@ build:tests:
 test:all:
   stage: test
   only:
-    - schedules    
-  dependencies: 
+    - schedules
+  dependencies:
     - build:tests
   script:
-    - echo "REPO_USER=${REPO_USER}" > $CI_PROJECT_DIR/reporc 
+    - echo "REPO_USER=${REPO_USER}" > $CI_PROJECT_DIR/reporc
     - echo "REPO_PASS=${REPO_PASS}" >> $CI_PROJECT_DIR/reporc
     - sudo scripts/test/fixture.sh create -f src/test/clmctest/rspec.json -r $CI_PROJECT_DIR
     - sudo mkdir /var/lib/lxd/containers/test-runner/rootfs/vagrant/build
diff --git a/README.md b/README.md
index 4af8cc0837380d6beaa46e4300a194f8aec020b2..b6db183816274575708cafea052c477837b27869 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@
 // © University of Southampton IT Innovation Centre, 2017
 //
 // Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road, 
+// IT Innovation Centre of Gamma House, Enterprise Road,
 // Chilworth Science Park, Southampton, SO16 7NS, UK.
 //
 // This software may not be used, sold, licensed, transferred, copied
@@ -28,7 +28,7 @@
 
 #### Authors
 
-|Authors|Organisation|                    
+|Authors|Organisation|
 |-|-|
 |[Michael Boniface](mailto:mjb@it-innovation.soton.ac.uk)|[University of Southampton, IT Innovation Centre](http://www.it-innovation.soton.ac.uk)|
 |[Simon Crowle](mailto:sgc@it-innovation.soton.ac.uk)|[University of Southampton, IT Innovation Centre](http://www.it-innovation.soton.ac.uk)|
@@ -37,7 +37,7 @@
 
 #### Documentation
 
-Implementation documentation and discussion can be found in the docs directory. 
+Implementation documentation and discussion can be found in the docs directory.
 
 #### Testing
 
@@ -46,69 +46,76 @@ Testing is implemented using pytest using the following convention:
 * The testing environment is Vagrant/Virtualbox for a base VM with LXC is installed for specific containers.
 * Tests are written in python using pytest
 * Related tests are stored in a python module `src/test/clmctest/<testmodule>` to create a suite of tests. All tests are stored in files test_*.py, there can be many tests per file, and many files per module
-* Tests are executed against a set of LXC containers described in `src/test/clmctest/rspec.json`. 
+* Tests are executed against a set of LXC containers described in `src/test/clmctest/rspec.json`.
 * Tests are executed from the test-runner container on the VM using install python modules
 
-Here's the instructions
+Create a single VM with LXC installed and configured with lxcbr0 configured for the network 172.40.231.0/24
 
-`vagrant up`
-
-This will create a single VM with LXC installed and configured with lxcbr0 configured for the network 172.40.231.0/24
+```shell
+vagrant up
+```
 
-SSH into the VM
+SSH into the VM:
 
-`vagrant ssh`
+```shell
+vagrant ssh
+```
 
-The containers are controlled using a script called /vagrant/scripts/test/fixtures.sh
+The containers are controlled using a script called `/vagrant/scripts/test/fixtures.sh`
 
-```
-Usage: fixture.sh create|start|stop|destroy [-f config_file] [-r repo_root] [-c service_name]"
+```shell
+Usage: fixture.sh create|start|stop|destroy [-f config_file] [-r repo_root] [-c container_name|all]
 ```
 
-To create all the services needed for integration tests
+The containers created are defined an rspec.json file, there's an example here: `/vagrant/src/test/clmctest/rspec.json`. The `fixtures.sh` script defaults to look for a `rspec.json` in the current directory, you can specify a specific `rspec.json` file using the `-f` option.
 
-```
-sudo su
-/vagrant/scripts/test/fixture.sh create -f /vagrant/src/test/clmctest/rspec.json
+To create|start|stop|destroy specific services use the `-c` option e.g.
+
+```shell
+/vagrant/scripts/test/fixture.sh create -f /vagrant/src/test/clmctest/rspec.json -c clmc-service
 ```
 
-The containers created are defined an rspec.json file, there's an example here `/vagrant/src/test/clmctest/rspec.json`
+The installation of several of the services depend on accessing the Nexus binary repository (for the custom Telegraf agent). To do this, a username and password for the repository must be specified in a `reporc` file in the user's home directory, e.g.
 
-The `fixtures.sh` script defaults to look for a rspec.json in the current directory, you can specify a specific rspec.json file using the -f option
+```shell
+REPO_USER=itinnov.flame.integration
+REPO_PASS=xxxxxxx
+```
 
-To create|start|stop|destroy specific services use the -c option e.g.
+Create all the services needed for integration tests:
 
-```
-/vagrant/scripts/test/fixture.sh create -f /vagrant/src/test/clmctest/rspec.json -c clmc-service
+```shell
+sudo su
+/vagrant/scripts/test/fixture.sh create -f /vagrant/src/test/clmctest/rspec.json -c all
 ```
 
+As part of the clmc-service installation, the service's unit tests have been run. The fixture script will fail if any individual service installation fails to install (or fails its tests).
+
 Attach to the test-runner to run the tests
 
-`lxc-attach -n test-runner`
+```shell
+lxc-attach -n test-runner
+```
 
-Here we need to define a make file but for now the commands are manual
+Build and install the CLMC test Python module:
 
-```
+```shell
 cd /vagrant/src/test
 python setup.py sdist --dist-dir=../../build
-cd ../../src/service
-python setup.py sdist --dist-dir=../../build
 pip3 install /vagrant/build/clmctest-SNAPSHOT.tar.gz
-pip3 install /vagrant/build/clmcservice-SNAPSHOT.tar.gz   
 ```
 
-The following modules are unit tests
+The following module is unit tests:
 
-```
+```shell
 pytest -s --pyargs clmctest.scripts
-pytest -s --pyargs clmcservice.tests  
 ```
 
-The following modules are integration tests
+The following modules are integration tests:
 
-```
+```shell
 pytest -s --pyargs clmctest.inputs
-pytest -s --pyargs clmctest.monitoring  
+pytest -s --pyargs clmctest.monitoring
 ```
 
 #### CI Testing
@@ -147,4 +154,4 @@ cp /tmp/install-git-runner.sh /var/lib/lxd/containers/clmc-ci/rootfs/tmp
 chmod 755 /var/lib/lxd/containers/clmc-ci/rootfs/tmp
 ```
 
-Note that the branch must be "protected" for the secret repo rc variables to be passed to the CI script
\ No newline at end of file
+Note that the branch must be "protected" for the secret repo rc variables to be passed to the CI script
diff --git a/docs/clmc-service.md b/docs/clmc-service.md
index c8ff0204e6fa2d13a59d6a6a9f30a8c4019bd2ff..ed81c97dce20a74be753db24c2967cc5435dc241 100644
--- a/docs/clmc-service.md
+++ b/docs/clmc-service.md
@@ -2,7 +2,7 @@
 // © University of Southampton IT Innovation Centre, 2018
 //
 // Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road, 
+// IT Innovation Centre of Gamma House, Enterprise Road,
 // Chilworth Science Park, Southampton, SO16 7NS, UK.
 //
 // This software may not be used, sold, licensed, transferred, copied
@@ -25,29 +25,225 @@
 
 #### **Authors**
 
-|Authors|Organisation|                    
-|---|---|  
+|Authors|Organisation|
+|---|---|
 |[Nikolay Stanchev](mailto:ns17@it-innovation.soton.ac.uk)|[University of Southampton, IT Innovation Centre](http://www.it-innovation.soton.ac.uk)|
 
 #### Description
 
 This document describes the CLMC service and its API endpoints. The CLMC service is implemented in the *Python* framework called **Pyramid**.
-It offers different API endpoints to configure and control the aggregator, which is an essential part in the process of measuring the end-to-end performance.
-All source code, tests and configuration files of the service can be found in the **src/clmc-webservice** folder.
+It offers different API endpoints to configure and control the aggregator as well as a CRUD API for service function endpoints configuration data and Graph API for calculating round trip time. All source code, tests and configuration files of the service can be found in the **src/service** folder.
 
-#### API Endpoints
+
+## Graph API Endpoints
+
+* **Assumptions**
+    * For each service function, there is a field/fields from which the service function response time (service delay) can be derived.
+    * For each service function, there is a field/fields from which an average estimation of the size of a **request** to this service function can be derived.
+    * For each service function, there is a field/fields from which an average estimation of the size of a **response** from this service function can be derived.
+    * All the aforementioned fields reside in a single measurement.
+    * There is at most 1 service function hosted on a particular endpoint.
+
+* **POST** ***/graph/temporal?from={timestamp-seconds}&to={timestamp-seconds}***
+
+    This API method sends a request to the CLMC service to build a graph related to the time range declared with the *from* and *to* URL parameters.
+
+   * Request:
+
+        Expects a JSON-formatted request body which declares the database, retention policy and service function chain instance for which the graph is built.
+        The request should also include the service functions that must be included in the graph along with the measurement name, response time field, request size field and
+        response size field for each service function. The declared fields could be influx functions across multiple fields.
+
+   * Request Body Example:
+
+        ```json
+        {
+          "database": "MSDemo",
+          "retention_policy": "autogen",
+          "service_function_chain_instance": "MSDemo_1",
+          "service_functions": {
+            "nginx": {
+              "response_time_field": "mean(response_time)",
+              "request_size_field": "mean(request_size)",
+              "response_size_field": "mean(response_size)",
+              "measurement_name": "nginx"
+            },
+            "minio": {
+              "response_time_field": "mean(sum)/mean(count)",
+              "request_size_field": "mean(request_size)/mean(count)",
+              "response_size_field": "mean(response_size)/mean(count)",
+              "measurement_name": "minio_http_requests_duration_seconds"
+            }
+          }
+        }
+        ```
+
+        These parameters are then filled in the following influx query template:
+
+        ```
+        SELECT {0} AS mean_response_time, {1} AS mean_request_size, {2} AS mean_response_size FROM "{3}"."{4}".{5} WHERE sfc_i='{6}' and time>={7} and time<{8} GROUP BY ipendpoint, location, sf_i
+        ```
+
+        E.g. for the minio service function, the following query will be used to retrieve the data from influx (request url is /graph/build?from=1528385420&to=1528385860):
+
+        ```
+        SELECT mean(sum)/mean(count) AS mean_response_time, mean(request_size)/mean(count) AS mean_request_size, mean(response_size)/mean(count) AS mean_response_size FROM "MSDemo"."autogen".minio_http_requests_duration_seconds WHERE sfc_i='MSDemo_1' and time>=1528385420000000000 and time<1528385860000000000 GROUP BY ipendpoint, location, sf_i
+        ```
+
+        N.B. timestamps are converted to nano seconds.
+
+   * Response:
+
+        The response of this request is a JSON content, which contains all request parameters used to build the graph, along with a request UUID. This request ID can then be used to manage the temporal subgraph that was created
+        in response to this request.
+
+        Returns a 400 Bad Request error if the request body is invalid.
+
+        Returns a 400 Bad Request error if the request URL parameters are invalid or missing.
+
+        Returns a 400 Bad Request error if the service function chain instance ID is not in the format `<sfcID>_<numberID>`
+
+   * Response Body Example:
+
+        ```json
+        {
+          "database": "MSDemo",
+          "retention_policy": "autogen",
+          "service_function_chain_instance": "MSDemo_1",
+          "service_functions": {
+            "nginx": {
+              "response_time_field": "mean(response_time)",
+              "request_size_field": "mean(request_size)",
+              "response_size_field": "mean(response_size)",
+              "measurement_name": "nginx"
+            },
+            "minio": {
+              "response_time_field": "mean(sum)/mean(count)",
+              "request_size_field": "mean(request_size)/mean(count)",
+              "response_size_field": "mean(response_size)/mean(count)",
+              "measurement_name": "minio_http_requests_duration_seconds"
+            }
+          },
+          "graph": {
+             "uuid": "75df6f8d-3829-4fd8-a3e6-b3e917010141",
+             "time_range": {
+               "from": 1528385420,
+               "to": 1528385860
+             }
+          }
+        }
+        ```
+
+* **DELETE** ***/graph/temporal/{graph_id}***
+
+    This API method sends a request to delete the temporal graph associated with a given request UUID (retrieved from the response of a build-graph request).
+    The request UUID must be given in the request URL, e.g. request sent to */graph/temporal/75df6f8d-3829-4fd8-a3e6-b3e917010141*
+
+    * Response:
+
+        The response of this request is a JSON content, which contains the request UUID and the number of deleted nodes.
+
+        Returns a 404 Not Found error if the request UUID is not associated with any nodes in the graph.
+
+   * Response Body Example:
+
+        ```json
+        {
+           "uuid": "75df6f8d-3829-4fd8-a3e6-b3e917010141",
+           "deleted": 5
+        }
+        ```
+
+* **GET** ***/graph/temporal/{graph_id}/round-trip-time?compute_node={compute_node_id}&endpoint={endpoint_id}***
+
+    This API method sends a request to run the Cypher Round-Trip-Time query over a temporal graph associated with a request UUID (retrieved from the response of a build-graph request).
+    The request UUID must be given in the request URL, e.g. request sent to */graph/temporal/75df6f8d-3829-4fd8-a3e6-b3e917010141/round-trip-time?compute_node=DC2&endpoint=minio_1_ep1*
+
+    * Response:
+
+        The response of this request is a JSON content, which contains the result from the Cypher query including forward latencies, reverse latencies and service function response time along with the
+        calculated round trip time and global tag values for the given service function endpoint.
+
+        Returns a 400 Bad Request error if the URL parameters are invalid
+
+        Returns a 404 Not Found error if the request UUID and the endpoint ID are not associated with an endpoint node in the graph.
+
+        Returns a 404 Not Found error if the compute node ID is not associated with a compute node in the graph.
+
+   * Response Body Example:
+
+        ```json
+        {
+            "request_size": 2048,
+            "response_size": 104857,
+            "bandwidth": 104857600,
+            "forward_latencies": [
+               22, 11
+            ],
+            "total_forward_latency": 33,
+            "reverse_latencies": [
+               15, 18
+            ],
+            "total_reverse_latency": 33,
+            "response_time": 15.75,
+            "round_trip_time": 81.75,
+            "global_tags": {
+                "sr": "SR1",
+                "ipendpoint": "minio_1_ep1",
+                "sfc": "MSDemo",
+                "sf_i": "minio_1",
+                "location": "DC1",
+                "sf": "minio",
+                "sfc_i": "MSDemo_1",
+                "host": "host2"
+            }
+        }
+        ```
+
+        Here, the *forward_latencies* and *reverse_latencies* lists represent the latency experienced at each hop between compute nodes. For example, if the path was DC2-DC3-DC4 and the SF endpoint was hosted
+        on DC4, the response data shows that latency(DC2-DC3) = 22, latency(DC3-DC4) = 11, latency(DC4-DC3) = 15, latency(DC3-DC2) = 18, response_time(minio_1_ep1) = 15.75
+
+        N.B. if the endpoint is hosted on the compute node identified in the URL parameter, then there will be no network hops between compute nodes, so the latency lists would be empty, example:
+
+        ```json
+        {
+            "request_size": 2048,
+            "response_size": 104857,
+            "bandwidth": 104857600,
+            "forward_latencies": [],
+            "total_forward_latency": 0,
+            "reverse_latencies": [],
+            "total_reverse_latency": 0,
+            "response_time": 3,
+            "round_trip_time": 3,
+            "global_tags": {
+                "sr": "SR1",
+                "ipendpoint": "minio_1_ep1",
+                "sfc": "MSDemo",
+                "sf_i": "minio_1",
+                "location": "DC1",
+                "sf": "minio",
+                "sfc_i": "MSDemo_1",
+                "host": "host2"
+            }
+        }
+        ```
+
+## Aggregator API Endpoints
+
+**Note: this API is deprecated. The graph API should be used to compute RTT instead.**
 
 * **GET** ***/aggregator/config***
 
     This API method retrieves information about the configuration of the aggregator.
-    
+
     * Response:
-        
+
         Returns a JSON-formatted response with the configuration data of the aggregator - *aggregator_report_period*, *aggregator_database_name*,
         *aggregator_database_url*.
-        
+
     * Response Body Example:
-        
+
         ```json
         {
           "aggregator_report_period": 5,
@@ -59,14 +255,14 @@ All source code, tests and configuration files of the service can be found in th
 * **PUT** ***/aggregator/config***
 
     This API method updates the configuration of the aggregator.
-    
+
     * Request:
-        
+
         Expects a JSON-formatted request body with the new configuration of the aggregator. The body should contain only
         three key fields - *aggregator_report_period* (positive integer, seconds), *aggregator_database_name* and *aggregator_database_url* (a valid URL).
-        
-    * Request Body Example: 
-    
+
+    * Request Body Example:
+
         ```json
         {
           "aggregator_report_period": 25,
@@ -74,14 +270,14 @@ All source code, tests and configuration files of the service can be found in th
           "aggregator_database_url": "http://172.50.231.61:8086"
         }
         ```
-    
+
     * Response:
-        
-        The body of the request is first validated before updating the configuration. If validation is successful, returns 
+
+        The body of the request is first validated before updating the configuration. If validation is successful, returns
         a JSON-formatted response with the new configuration data. Otherwise, an **HTTP Bad Request** response is returned.
-        
+
     * Response Body Example:
-    
+
         ```json
         {
           "aggregator_report_period": 25,
@@ -89,15 +285,15 @@ All source code, tests and configuration files of the service can be found in th
           "aggregator_database_url": "http://172.50.231.61:8086"
         }
         ```
-    
+
     * Notes:
-    
+
         If the configuration is updated, while the aggregator is running, it is not automatically restarted. An explicit API call
         must be made with a *restart* request to apply the updated configuration. In the case of such PUT request as the one described
         above, the response will contain more information indicating that the configuration of the aggregator is in a malformed state.
-        
+
         * Response Body Example:
-        
+
             ```json
             {
               "aggregator_report_period": 125,
@@ -111,22 +307,22 @@ All source code, tests and configuration files of the service can be found in th
 * **GET** ***/aggregator/control***
 
     This API method retrieves information about the status of the aggregator - whether it is running or not.
-    
+
     * Response:
-        
+
         Returns a JSON-formatted response with the status data of the aggregator - *aggregator_running* field. If the aggregator
         is running in a malformed state, the response will also indicate this with two additional fields - *malformed* and *comment*.
-        
+
     * Response Body Example:
-        
+
         ```json
         {
           "aggregator_running": true
         }
         ```
-        
+
     * Response Body Example - for malformed configuration:
-    
+
         ```json
         {
           "aggregator_running": true,
@@ -138,97 +334,289 @@ All source code, tests and configuration files of the service can be found in th
 * **PUT** ***/aggregator/control***
 
     This API method updates the status of the aggregator - a user can start, stop or restart it.
-    
+
     * Request:
-        
+
         Expects a JSON-formatted request body with the new status of the aggregator. The body should contain only one key
         field - *action* (the action to undertake, which can be **start**, **restart** or **stop**)
-        
-    * Request Body Example: 
-    
+
+    * Request Body Example:
+
         ```json
         {
           "action": "start"
         }
         ```
-    
+
     * Response:
-        
+
         The body of the request is first validated before taking any actions. If the action is not one of the listed above,
         then the validation will fail. If validation is successful, returns a JSON-formatted response with the new status of
         the aggregator. Otherwise, an **HTTP Bad Request** response is returned.
-        
+
     * Response Body Example:
-    
+
         ```json
         {
           "aggregator_running": true
         }
         ```
-        
+
     * Notes:
-    
+
         * If a **start** action is requested, while the aggregator is running, then the request will be ignored. To restart the
         aggregator, a user should use a **restart** action.
-        
+
         * If a **stop** action is requested, while the aggregator is not running, then the request will be ignored.
-        
+
         * A request with a **restart** action, while the aggregator is not running, has the same functionality as a request
         with a **start** action.
-        
+
         * The functionality of a request with a **restart** action is the same as the functionlity of a **stop** action
         followed by a **start** action.
-        
-#### Installing and running the CLMC service (development mode)
+
+## CRUD API for service function endpoint configurations
+
+**Note: this API is experimental and is not intended to be used**
+
+* **GET** ***/whoami/endpoints***
+
+    This API method retrieves all service function endpoint configurations in a JSON format.
+
+    * Response:
+
+        Returns a JSON-formatted response - a list of JSON objects, each object representing a service function endpoint configuration.
+
+    * Response Body Example:
+
+        - No service function endpoint configurations found.
+        ```json
+        []
+        ```
+
+        - Multiple service function endpoint configurations found.
+        ```json
+        [
+          {
+           "location": "location_1",
+           "sfc": "sfc_1",
+           "sfc_i": "sfc_i_1",
+           "sf": "sf_1",
+           "sf_i": "sf_i_1",
+           "sf_endpoint": "sf_endpoint_1",
+           "sr": "sr_1"
+            },
+          {
+           "location": "location_2",
+           "sfc": "sfc_2",
+           "sfc_i": "sfc_i_2",
+           "sf": "sf_2",
+           "sf_i": "sf_i_2",
+           "sf_endpoint": "sf_endpoint_2",
+           "sr": "sr_2"
+            }
+        ]
+        ```
+
+* **GET** ***/whoami/endpoints/instance?sr={sr_id}&sf_i={sf_instance_id}&sf_endpoint={sf_endpoint_id}***
+
+    This API method retrieves the uniquely defined service function endpoint configuration associated with the given URL parameters - sr, sf_i and sf_endpoint.
+
+    * Response:
+
+        Returns a JSON-formatted response - a JSON object representing the service function endpoint configuration if it exists.
+
+        Returns a 404 Not Found error if there is no service function endpoint configuration associated with the given URL parameters.
+
+        Returns a 400 Bad Request error if the url parameters are invalid.
+
+    * Response Body Example:
+
+        - Request made to /whoami/endpoints/instance?sr=sr_1&sf_i=sf_i_1&sf_endpoint=sf_endpoint_1
+        ```json
+        {
+          "location": "location_1",
+          "sfc": "sfc_1",
+          "sfc_i": "sfc_i_1",
+          "sf": "sf_1",
+          "sf_i": "sf_i_1",
+          "sf_endpoint": "sf_endpoint_1",
+          "sr": "sr_1"
+           }
+        ```
+
+* **POST** ***/whoami/endpoints***
+
+    This API method creates a new service function endpoint configuration.
+
+    * Request:
+
+        Expects a JSON-formatted request body with the new service function endpoint configuration.
+
+    * Request Body Example:
+
+    ```json
+        {
+          "location": "location_1",
+          "sfc": "sfc_1",
+          "sfc_i": "sfc_i_1",
+          "sf": "sf_1",
+          "sf_i": "sf_i_1",
+          "sf_endpoint": "sf_endpoint_1",
+          "sr": "sr_1"
+           }
+     ```
+
+    * Response
+
+        Returns a JSON-formatted response - a JSON object representing the service function endpoint configuration that was created.
+
+        Returns a 400 Bad Request error if the request body is invalid.
+
+        Returns a 409 Conflict error if there exists another service function endpoint configuration with the same 'sr', 'sf_i' and 'sf_endpoint' values.
+
+    * Response Body Example:
+
+    ```json
+        {
+          "location": "location_1",
+          "sfc": "sfc_1",
+          "sfc_i": "sfc_i_1",
+          "sf": "sf_1",
+          "sf_i": "sf_i_1",
+          "sf_endpoint": "sf_endpoint_1",
+          "sr": "sr_1"
+           }
+     ```
+
+* **PUT** ***/whoami/endpoints/instance?sr={sr_id}&sf_i={sf_instance_id}&sf_endpoint={sf_endpoint_id}***
+
+    This API method replaces the uniquely defined service function endpoint configuration associated with the given URL parameters - sr, sf_i and sf_endpoint with a new service
+    function endpoint configuration given in the request body (JSON format). It can also be used for updating.
+
+    * Request:
+
+        Expects a JSON-formatted request body with the new service function endpoint configuration.
+
+    * Request Body Example:
+
+    ```json
+        {
+          "location": "location_2",
+          "sfc": "sfc_1",
+          "sfc_i": "sfc_i_1",
+          "sf": "sf_1",
+          "sf_i": "sf_i_1",
+          "sf_endpoint": "sf_endpoint_1",
+          "sr": "sr_1"
+           }
+     ```
+
+    * Response
+
+        Returns a JSON-formatted response - a JSON object representing the new service function endpoint configuration that was created (updated).
+
+        Returns a 400 Bad Request error if the request body is invalid.
+
+        Returns a 400 Bad Request error if the url parameters are invalid.
+
+        Returns an 404 Not Found error if there is no service function endpoint configuration associated with the given URL parameters.
+
+        Returns a 409 Conflict error if there exists another service function endpoint configuration with the same 'sr', 'sf_i' and 'sf_endpoint' values as the ones in the request body.
+
+    * Response Body Example:
+
+        - Request made to /whoami/endpoints/instance?sr=sr_1&sf_i=sf_i_1&sf_endpoint=sf_endpoint_1
+        ```json
+            {
+              "location": "location_2",
+              "sfc": "sfc_1",
+              "sfc_i": "sfc_i_1",
+              "sf": "sf_1",
+              "sf_i": "sf_i_1",
+              "sf_endpoint": "sf_endpoint_1",
+              "sr": "sr_1"
+               }
+         ```
+
+* **DELETE** ***/whoami/endpoints/instance?sr={sr_id}&sf_i={sf_instance_id}&sf_endpoint={sf_endpoint_id}***
+
+    This API method deletes the uniquely defined service function endpoint configuration associated with the given URL parameters - sr, sf_i and sf_endpoint.
+
+    * Response:
+
+        Returns the JSON representation of the deleted object.
+
+        Returns an 404 Not Found error if there is no service function endpoint configuration associated with the given URL parameters.
+
+        Returns a 400 Bad Request error if the url parameters are invalid.
+
+    * Response Body Example:
+
+        - Request made to /whoami/endpoints/instance?sr=sr_1&sf_i=sf_i_1&sf_endpoint=sf_endpoint_1
+        ```json
+         {
+          "location": "location_1",
+          "sfc": "sfc_1",
+          "sfc_i": "sfc_i_1",
+          "sf": "sf_1",
+          "sf_i": "sf_i_1",
+          "sf_endpoint": "sf_endpoint_1",
+          "sr": "sr_1"
+           }
+        ```
+
+## Installing and running the CLMC service
 
 Before installing the CLMC service and its dependencies, it is recommended to use a python virtual environment. To easily
 manage virtual environments, **virtualenvwrapper** can be used.
 
-```
-pip install virtualenvwrapper
+```shell
+pip3 install virtualenvwrapper
 ```
 
 To create a virtual environment use the **mkvirtualenv** command:
 
-```
+```shell
 mkvirtualenv CLMC
 ```
 
 When created, you should already be set to use the new virtual environment, but to make sure of this use the **workon** command:
 
-```
+```shell
 workon CLMC
 ```
 
-Now, any installed libraries will be installed relative to this environment only. 
+Now, any installed libraries will be installed relative to this environment only.
 
-The easiest way to install and use the CLMC service locally is to use **pip**. Navigate to the clmc-webservice folder:
-```
-cd src/clmc-webservice
+The easiest way to install and use the CLMC service locally is to use **pip**. Navigate to the clmc-service folder:
+
+```shell
+cd src/service
 ```
 
 Test the CLMC service using **tox** along with the ***tox.ini*** configuration file. If tox is not installed run:
 
-```
-pip install tox
+```shell
+pip3 install tox
 ```
 
 After it is installed, simply use the **tox** command:
 
-```
+```shell
 tox
 ```
 
-Then install the service in development mode.
+Then install the service.
 
-```
-pip install -e .
+```shell
+pip3 install .
 ```
 
-Finally, start the service on localhost by using pyramid's **pserve**:
+Finally, start the service on localhost by using pyramid's **pserve** command line utility:
 
-```
-pserve development.ini --reload
+```shell
+pserve production.ini
 ```
 
-You should now be able to make requests to the CLMC service on http://localhost:9080/aggregator/config and http://localhost:9080/aggregator/control.
+You should now be able to make requests to the CLMC service on the various API endpoints.
diff --git a/scripts/clmc-service/install-clmc-service.sh b/scripts/clmc-service/install-clmc-service.sh
index c1d93d99b34af7389ea81ec26868cbd19142f304..cd869501c403928d54b45947c07afaf5a82f239b 100755
--- a/scripts/clmc-service/install-clmc-service.sh
+++ b/scripts/clmc-service/install-clmc-service.sh
@@ -3,7 +3,7 @@
 # Get command line parameters
 if [ "$#" -ne 3 ]; then
     echo "Error: illegal number of arguments: "$#
-    echo "Usage: install.sh INFLUX_URL DATABASE_NAME REPORT_PERIOD"
+    echo "Usage: install-clmc-service.sh INFLUX_URL DATABASE_NAME REPORT_PERIOD"
     exit 1 
 fi
 
@@ -11,6 +11,25 @@ INFLUX_URL=$1
 DATABASE_NAME=$2
 REPORT_PERIOD=$3
 
+apt-get update
+
+# Create the database for the WHOAMI API
+apt-get install -y postgresql postgresql-contrib
+sudo -u postgres bash -c "psql -c \"CREATE USER clmc WITH PASSWORD 'clmc_service';\""
+sudo -u postgres bash -c "psql -c \"ALTER USER clmc CREATEDB;\""
+sudo -u postgres createdb whoamidb
+sudo -u postgres bash -c "psql -c \"GRANT ALL PRIVILEGES ON DATABASE \"whoamidb\" to clmc;\""
+
+# install virtualenvwrapper to manage python environments - and check
+echo "----> Installing Python3, Pip3 and curl"
+apt-get install -y python3 python3-pip curl
+update-alternatives --install /usr/bin/python python /usr/bin/python3 10
+
+echo "----> Installing virtualenv and wrapper"
+apt-get install -y python3-virtualenv virtualenvwrapper
+pip3 install virtualenv
+pip3 install virtualenvwrapper
+
 ## CLMC-SERVICE
 ## ----------------------------------------------------------------------------------
 echo "----> Configuring virtualenvwrapper"
@@ -50,10 +69,10 @@ if [ $? -ne 0 ] ; then
 fi
 
 # navigate to the clmc-webservice - and check
-echo "----> Moving to CLMC webservice"
+echo "----> Moving to CLMC service"
 cd ${REPO_ROOT}/src/service
 if [ $? -ne 0 ] ; then
-        echo "Failed: could not find clmc-webservice"
+        echo "Failed: could not find clmc-service"
 		exit 1
 fi
 
@@ -80,6 +99,14 @@ fi
 echo "----> Creating CLMC web service log directory"
 mkdir -p /var/log/flame/clmc
 
+# initialise the CLMC service database with the model tables
+echo "----> Initialising CLMC database"
+initialize_clmcservice_db production.ini
+if [ $? -ne 0 ] ; then
+        echo "Failed: switching to CLMC python environment"
+		exit 1
+fi
+
 # Install minioclmc as systemctl service
 # -----------------------------------------------------------------------
 mkdir -p /opt/flame/clmc
diff --git a/scripts/clmc-service/install-tick-stack.sh b/scripts/clmc-service/install-tick-stack.sh
index f52fce9a09827b71237ba915eb0543acaa2fd9a1..d2f639d68218b22e69d9c6bea6b9503770ed1706 100755
--- a/scripts/clmc-service/install-tick-stack.sh
+++ b/scripts/clmc-service/install-tick-stack.sh
@@ -11,16 +11,9 @@ KAPACITOR_CHECKSUM=eea9b215f241906570eafe3857e1d4c5
 CHRONOGRAF_VERSION=1.4.4.2
 CHRONOGRAF_CHECKSUM=eea6915aa6db8f134fcd3b095e863b773bfb3a16a26e346dd65904a07df97963
 
-# install virtualenvwrapper to manage python environments - and check
 apt-get update
-echo "----> Installing Python3 and Pip3"
-apt-get install -y python3 python3-pip wget curl
-update-alternatives --install /usr/bin/python python /usr/bin/python3 10
-
-echo "----> Installing virtualenv and wrapper"
-apt-get install -y python3-virtualenv virtualenvwrapper
-pip3 install virtualenv
-pip3 install virtualenvwrapper
+echo "----> Installing wget"
+apt-get install -y wget
 
 # install influx
 echo "----> Installing InfluxDB"
diff --git a/scripts/clmc-service/install.sh b/scripts/clmc-service/install.sh
index 49abe7af6e48fc6d1b05852f40e071c3f93106f2..420dddab39d17444c001114ee0427b7167e4b60d 100755
--- a/scripts/clmc-service/install.sh
+++ b/scripts/clmc-service/install.sh
@@ -25,7 +25,14 @@
 #/////////////////////////////////////////////////////////////////////////
 
 # Force fail on command fail (off for now as virtualenvwrapper install fails)
-# set -euo pipefail
+set -euo pipefail
+
+# Get command line parameters
+if [ "$#" -ne 3 ]; then
+    echo "Error: illegal number of arguments: "$#
+    echo "Usage: install.sh INFLUX_URL DATABASE_NAME REPORT_PERIOD"
+    exit 1 
+fi
 
 # Ensure everything runs in directory of the parent script
 cd `dirname $0`
@@ -33,9 +40,5 @@ cd `dirname $0`
 echo "Provisioning CLMC service"
 
 ./install-tick-stack.sh $@
-./install-clmc-service.sh $@
 ./install-neo4j.sh $@
-
-
-
-
+./install-clmc-service.sh $@
\ No newline at end of file
diff --git a/scripts/test/fixture.sh b/scripts/test/fixture.sh
index 0c062ddea21eefbb48545913e8013c6782dd95bd..0aa2f4c0d55663007a87daa01e5f8fd7a5783057 100755
--- a/scripts/test/fixture.sh
+++ b/scripts/test/fixture.sh
@@ -1,7 +1,13 @@
 #!/bin/bash
 
+repo_root="/vagrant"
+config_file="rspec.json"
+
 usage() {
-    echo "Usage: $0 create|start|stop|destroy [-f config_file] [-r repo_root] [-c service_name]" 1>&2
+    echo "Usage: $0 create|start|stop|destroy [-f config_file] [-r repo_root] [-c container_name|all]" 1>&2
+    echo "    -f defaults to '${config_file}'" 1>&2
+    echo "    -r defaults to '${repo_root}'" 1>&2
+    echo "    -c must be specified, use 'all' for all" 1>&2
     exit 1
 }
 
@@ -43,18 +49,27 @@ create() {
 
         # provision software into each container
         echo "Provisioning: ${service_name}"
-        if [ ${service_name} == "clmc-service" ]
-        then
+        if [ ${service_name} == "clmc-service" ]; then
             influxdb_url=$(echo $SERVICE | jq -r '.influxdb_url')
             database_name=$(echo $SERVICE | jq -r '.database_name')
             report_period=$(echo $SERVICE | jq -r '.report_period')
             cmd="/vagrant/scripts/clmc-service/install.sh ${influxdb_url} ${database_name} ${report_period}"
             echo "Provisioning command ${cmd}"
             lxc exec ${service_name} --env REPO_ROOT="/vagrant" -- ${cmd}
+            exit_code=$?
+            if [ $exit_code != 0 ]; then
+                echo "clmc-service installation failed with exit code ${exit_code}"
+                exit 1
+            fi            
         elif [ ${service_name} == "test-runner" ]
         then
             cmd=/vagrant/src/test/clmctest/services/pytest/install.sh
             lxc exec ${service_name} -- ${cmd}
+            exit_code=$?
+            if [ $exit_code != 0 ]; then
+                echo "test-runner installation failed with exit code ${exit_code}"
+                exit 1
+            fi            
         else
             # get container parameters
             location=$(echo $SERVICE | jq -r '.location')
@@ -70,11 +85,21 @@ create() {
             # install service function specific software
             cmd=/vagrant/src/test/clmctest/services/${sf_id}/install.sh
             lxc exec ${service_name} --env REPO_ROOT="/vagrant" -- ${cmd}
-
+            exit_code=$?
+            if [ $exit_code != 0 ]; then
+                echo "${sf_id} installation failed with exit code ${exit_code}"
+                exit 1
+            fi
             # install telegraf
             cmd=/vagrant/scripts/clmc-agent/install.sh
             lxc exec ${service_name} --env REPO_ROOT="/vagrant" -- /vagrant/scripts/clmc-agent/install.sh
 
+            # check that telegraf installed (it may not have if the reporc file was not present or Nexus server was down)
+            if lxc-attach -n ${service_name} -- ls /etc/telegraf |& grep 'ls: cannot access'; then
+                echo "Telegraf agent failed to install (check reporc?)"
+                exit 1
+            fi
+
             # stop telegraf before changing the configs
             lxc exec ${service_name} -- service telegraf stop
 
@@ -156,9 +181,6 @@ if [ $# -gt 1 ]; then
     OPTIND=$((OPTIND+1))
 fi
 
-repo_root="/vagrant"
-config_file="rspec.json"
-
 # collect the optional arguments
 while getopts "hf:r:c:" opt; do
   case $opt in
@@ -184,12 +206,19 @@ if [ ! -d ${repo_root} ]; then
     exit 1
 fi
 
+# check a service has been defined
+if [ -z ${container} ]; then
+    echo "A container must be named: use 'all' for all"
+    usage
+    exit 1
+fi
+
 # iterate of list of services in configuration file
 command=$1
 service_names=$(jq -r '.[].name' ${config_file})
 for service_name in $service_names; do
     # if all or specific container
-    if [ -z ${container} ] || [ ${container} == ${service_name} ]; then
+    if [ ${container} = all -o ${container} = ${service_name} ]; then
         case "${command}" in
             create)
                 create ${service_name} ${config_file} ${repo_root}
@@ -210,6 +239,8 @@ for service_name in $service_names; do
     fi
 done
 
-echo "------>Create iptables summary"
+
+
+echo -e "\n\n------>Create iptables summary"
 iptables -t nat -L -n -v
 iptables-save > /etc/iptables/rules.v4
diff --git a/src/service/.coveragerc b/src/service/.coveragerc
index a3edc11a103c310fff7b5b2dc3ef5937f1dcfa0d..9f2b9eaf4f0eb7178a1f8be6f9e3eeeb4c4ccbf4 100644
--- a/src/service/.coveragerc
+++ b/src/service/.coveragerc
@@ -1,3 +1,9 @@
 [run]
 source = clmcservice
-omit = clmcservice/tests.py
+omit =
+        *test*
+        *__init__*
+        clmcservice\aggregation\influx_data_interface.py
+        clmcservice\configapi\views.py
+        clmcservice\whoamiapi\views.py
+# configapi\views and whoami\views are currently omitted since there is no implementation there, yet
\ No newline at end of file
diff --git a/src/service/clmcservice/__init__.py b/src/service/clmcservice/__init__.py
index 3473f9c9e91d777883c4a01d8a703f99643b677e..ff5cee802accce0f0e9ac0e689abe2e9ac735a64 100644
--- a/src/service/clmcservice/__init__.py
+++ b/src/service/clmcservice/__init__.py
@@ -1,3 +1,4 @@
+#!/usr/bin/python3
 """
 // © University of Southampton IT Innovation Centre, 2018
 //
@@ -21,8 +22,12 @@
 //      Created for Project :   FLAME
 """
 
+from os import path
+from json import load
 from pyramid.config import Configurator
-from clmcservice.utilities import RUNNING_FLAG, MALFORMED_FLAG
+from sqlalchemy import engine_from_config
+from clmcservice.models.meta import DBSession, Base
+from clmcservice.aggregationapi.utilities import validate_conf_file, RUNNING_FLAG, MALFORMED_FLAG, CONF_FILE_ATTRIBUTE, CONF_OBJECT, AGGREGATOR_CONFIG_SECTION
 
 
 def main(global_config, **settings):
@@ -30,24 +35,41 @@ def main(global_config, **settings):
     This function returns a Pyramid WSGI application.
     """
 
-    # a conversion is necessary so that the configuration values of the aggregator are stored with the right type instead of strings
-    aggregator_report_period = int(settings.get('aggregator_report_period', 5))
-    settings['aggregator_report_period'] = aggregator_report_period
+    engine = engine_from_config(settings, 'sqlalchemy.')  # initialise a database engine by using the 'sqlalchemy' setting in the configuration .ini file
+    DBSession.configure(bind=engine)  # bind the engine to a DB session
+    Base.metadata.bind = engine  # bind the engine to the Base class metadata
+
+    # validate and use (if valid) the configuration file
+    conf_file_path = settings[CONF_FILE_ATTRIBUTE]
+    conf = validate_conf_file(conf_file_path)  # if None returned here, service is in unconfigured state
+    settings[CONF_OBJECT] = conf
 
     settings[MALFORMED_FLAG] = False
+    settings['influx_port'] = int(settings['influx_port'])  # the influx port setting must be converted to integer instead of a string
+
+    network_config_file_path = settings["network_configuration_path"]
+    with open(network_config_file_path) as f:
+        network = load(f)
+        settings["network_bandwidth"] = network["bandwidth"]
 
     config = Configurator(settings=settings)
 
+    # add routes of the aggregator API
     config.add_route('aggregator_config', '/aggregator/config')
-    config.add_view('clmcservice.views.AggregatorConfig', attr='get', request_method='GET')
-    config.add_view('clmcservice.views.AggregatorConfig', attr='put', request_method='PUT')
-
     config.add_route('aggregator_controller', '/aggregator/control')
-    config.add_view('clmcservice.views.AggregatorController', attr='get', request_method='GET')
-    config.add_view('clmcservice.views.AggregatorController', attr='put', request_method='PUT')
 
-    config.add_route('round_trip_time_query', '/query/round-trip-time')
-    config.add_view('clmcservice.views.RoundTripTimeQuery', attr='get', request_method='GET')
+    # add routes of the WHOAMI API
+    config.add_route('whoami_endpoints', '/whoami/endpoints')
+    config.add_route('whoami_endpoints_instance', 'whoami/endpoints/instance')
+
+    # add routes of the CONFIG API
+    config.add_route('config_sfc', '/config/sf-chains')
+    config.add_route('config_sfc_instance', '/config/sf-chains/instance')
+
+    # add routes of the GRAPH API
+    config.add_route('graph_build', '/graph/temporal')
+    config.add_route('graph_manage', '/graph/temporal/{graph_id}')
+    config.add_route('graph_algorithms_rtt', '/graph/temporal/{graph_id}/round-trip-time')
 
-    config.scan()
+    config.scan()  # This method scans the packages and finds any views related to the routes added in the app configuration
     return config.make_wsgi_app()
diff --git a/src/service/clmcservice/aggregation/__init__.py b/src/service/clmcservice/aggregation/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..8b137891791fe96927ad78e64b0aad7bded08bdc 100644
--- a/src/service/clmcservice/aggregation/__init__.py
+++ b/src/service/clmcservice/aggregation/__init__.py
@@ -0,0 +1 @@
+
diff --git a/src/service/clmcservice/aggregation/aggregator.py b/src/service/clmcservice/aggregation/aggregator.py
index f0a00d3a311b843b1dedc8e8f93227c76843af80..1199562d9b72240bdbeb9d9d29f0ee99b6a19f1c 100644
--- a/src/service/clmcservice/aggregation/aggregator.py
+++ b/src/service/clmcservice/aggregation/aggregator.py
@@ -26,7 +26,7 @@ from threading import Thread, Event
 from influxdb import InfluxDBClient
 from time import time, sleep
 from urllib.parse import urlparse
-from clmcservice.utilities import generate_e2e_delay_report
+from clmcservice.aggregationapi.utilities import generate_e2e_delay_report
 import getopt
 import logging
 
@@ -223,23 +223,11 @@ class AggregatorThread(Thread):
 
         self.aggregator.stop()
 
-    def add_event_lock(self, event):
-        """
-         Auxiliary method to set a thread-safe event lock object to the aggregator (used for testing).
-
-         :param event: the event lock object
-         """
-
-        setattr(self, 'event', event)
-
     def run(self):
         """
         The method to execute when the thread starts.
         """
 
-        if hasattr(self, 'event'):
-            self.event.set()
-
         self.aggregator.run()
 
 
diff --git a/src/service/clmcservice/aggregation/influx_data_interface.py b/src/service/clmcservice/aggregation/influx_data_interface.py
index f375469a13502a08efbf5756866aa7b2d58f1799..c6781d0c35d7d77c68932499591e9e67968c637d 100644
--- a/src/service/clmcservice/aggregation/influx_data_interface.py
+++ b/src/service/clmcservice/aggregation/influx_data_interface.py
@@ -23,7 +23,7 @@
 """
 
 
-from clmcservice.utilities import generate_e2e_delay_report
+from clmcservice.aggregationapi.utilities import generate_e2e_delay_report
 
 """
 A python module which provides auxiliary functions to mimic the behaviour of an InfluxDBClient when unit testing the aggregator.
diff --git a/src/service/clmcservice/aggregationapi/__init__.py b/src/service/clmcservice/aggregationapi/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc
--- /dev/null
+++ b/src/service/clmcservice/aggregationapi/__init__.py
@@ -0,0 +1 @@
+
diff --git a/src/service/clmcservice/tests.py b/src/service/clmcservice/aggregationapi/tests.py
similarity index 51%
rename from src/service/clmcservice/tests.py
rename to src/service/clmcservice/aggregationapi/tests.py
index d9980cb7ff8767e61842269d24763a287fbe255e..086f3d378bc500f590ee386c6a50b246b04dac37 100644
--- a/src/service/clmcservice/tests.py
+++ b/src/service/clmcservice/aggregationapi/tests.py
@@ -1,3 +1,4 @@
+#!/usr/bin/python3
 """
 // © University of Southampton IT Innovation Centre, 2018
 //
@@ -21,13 +22,14 @@
 //      Created for Project :   FLAME
 """
 
-import pytest
 from pyramid import testing
 from pyramid.httpexceptions import HTTPBadRequest
 from time import sleep
-from clmcservice.utilities import CONFIG_ATTRIBUTES, PROCESS_ATTRIBUTE, RUNNING_FLAG, MALFORMED_FLAG, URL_REGEX
+from clmcservice.aggregationapi.utilities import CONF_FILE_ATTRIBUTE, CONF_OBJECT, AGGREGATOR_CONFIG_SECTION, CONFIG_ATTRIBUTES, PROCESS_ATTRIBUTE, RUNNING_FLAG, MALFORMED_FLAG, URL_REGEX
+import pytest
 import os
 import signal
+import configparser
 
 
 class TestAggregatorAPI(object):
@@ -41,9 +43,10 @@ class TestAggregatorAPI(object):
         A fixture to implement setUp/tearDown functionality for all tests by initializing configuration structure for the web service
         """
 
-        self.config = testing.setUp()
-        self.config.add_settings({'aggregator_running': False, 'malformed': False, 'aggregator_report_period': 5,
-                                  'aggregator_database_name': 'CLMCMetrics', 'aggregator_database_url': "http://172.40.231.51:8086"})
+        self.registry = testing.setUp()
+        config = configparser.ConfigParser()
+        config[AGGREGATOR_CONFIG_SECTION] = {'aggregator_report_period': 5, 'aggregator_database_name': 'CLMCMetrics', 'aggregator_database_url': "http://172.40.231.51:8086"}
+        self.registry.add_settings({'configuration_object': config, 'aggregator_running': False, 'malformed': False, 'configuration_file_path': "/etc/flame/clmc/service.conf"})
 
         yield
 
@@ -54,11 +57,25 @@ class TestAggregatorAPI(object):
         Tests the GET method for the configuration of the aggregator.
         """
 
-        from clmcservice.views import AggregatorConfig  # nested import so that importing the class view is part of the test itself
+        from clmcservice.aggregationapi.views import AggregatorConfig  # nested import so that importing the class view is part of the test itself
+
+        setup_config = self.registry.get_settings()[CONF_OBJECT]
+
+        # test an error is thrown when aggregator is in unconfigured state
+        self.registry.get_settings()[CONF_OBJECT] = None
+        request = testing.DummyRequest()
+        error_raised = False
+        try:
+            AggregatorConfig(request).get()
+        except HTTPBadRequest:
+            error_raised = True
+        assert error_raised, "Error must be raised in case of a not configured aggregator."
+        self.registry.get_settings()[CONF_OBJECT] = setup_config
 
-        assert self.config.get_settings().get('aggregator_report_period') == 5, "Initial report period is 5 seconds."
-        assert self.config.get_settings().get('aggregator_database_name') == 'CLMCMetrics', "Initial database name the aggregator uses is CLMCMetrics."
-        assert self.config.get_settings().get('aggregator_database_url') == "http://172.40.231.51:8086", "Initial aggregator url is http://172.40.231.51:8086"
+        # test GET method when aggregator is configured
+        assert int(self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_report_period')) == 5, "Initial report period is 5 seconds."
+        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_name') == 'CLMCMetrics', "Initial database name the aggregator uses is CLMCMetrics."
+        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_url') == "http://172.40.231.51:8086", "Initial aggregator url is http://172.40.231.51:8086"
 
         request = testing.DummyRequest()
         response = AggregatorConfig(request).get()
@@ -67,9 +84,9 @@ class TestAggregatorAPI(object):
                             'aggregator_database_name': 'CLMCMetrics',
                             'aggregator_database_url': "http://172.40.231.51:8086"}, "Response must be a dictionary representing a JSON object with the correct configuration data of the aggregator."
 
-        assert self.config.get_settings().get('aggregator_report_period') == 5, "A GET request must not modify the aggregator configuration data."
-        assert self.config.get_settings().get('aggregator_database_name') == 'CLMCMetrics', "A GET request must not modify the aggregator configuration data."
-        assert self.config.get_settings().get('aggregator_database_url') == "http://172.40.231.51:8086", "A GET request must not modify the aggregator configuration data."
+        assert int(self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_report_period')) == 5, "A GET request must not modify the aggregator configuration data."
+        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_name') == 'CLMCMetrics', "A GET request must not modify the aggregator configuration data."
+        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_url') == "http://172.40.231.51:8086", "A GET request must not modify the aggregator configuration data."
 
     @pytest.mark.parametrize("input_body, output_value", [
         ('{"aggregator_report_period": 10, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://171.40.231.51:8086"}',
@@ -99,12 +116,12 @@ class TestAggregatorAPI(object):
         :param output_value: the expected output value, None for expecting an Exception
         """
 
-        from clmcservice.views import AggregatorConfig, AggregatorController  # nested import so that importing the class view is part of the test itself
+        from clmcservice.aggregationapi.views import AggregatorConfig, AggregatorController  # nested import so that importing the class view is part of the test itself
 
-        assert not AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert self.config.get_settings().get('aggregator_report_period') == 5, "Initial report period is 5 seconds."
-        assert self.config.get_settings().get('aggregator_database_name') == 'CLMCMetrics', "Initial database name the aggregator uses is CLMCMetrics."
-        assert self.config.get_settings().get('aggregator_database_url') == "http://172.40.231.51:8086", "Initial aggregator url is http://172.40.231.51:8086"
+        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
+        assert int(self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_report_period')) == 5, "Initial report period is 5 seconds."
+        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_name') == 'CLMCMetrics', "Initial database name the aggregator uses is CLMCMetrics."
+        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_url') == "http://172.40.231.51:8086", "Initial aggregator url is http://172.40.231.51:8086"
 
         request = testing.DummyRequest()
         request.body = input_body.encode(request.charset)
@@ -114,9 +131,19 @@ class TestAggregatorAPI(object):
             assert response == output_value, "Response of PUT request must include the new configuration of the aggregator"
 
             for attribute in CONFIG_ATTRIBUTES:
-                assert self.config.get_settings().get(attribute) == output_value.get(attribute), "Aggregator settings configuration is not updated."
+                assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION][attribute] == str(output_value[attribute]), "Aggregator settings configuration is not updated."
+
+            assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Aggregator running status should not be updated after a configuration update."
+
+            # assert that the conf file is updated
+            updated_conf = configparser.ConfigParser()
+            conf_file = self.registry.get_settings().get(CONF_FILE_ATTRIBUTE)
+            assert updated_conf.read(conf_file) == [conf_file]
+            assert AGGREGATOR_CONFIG_SECTION in updated_conf.sections()
+
+            for attribute in CONFIG_ATTRIBUTES:
+                assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION][attribute] == updated_conf[AGGREGATOR_CONFIG_SECTION][attribute], "Aggregator settings configuration is not updated."
 
-            assert not AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "Aggregator running status should not be updated after a configuration update."
         else:
             error_raised = False
             try:
@@ -131,19 +158,34 @@ class TestAggregatorAPI(object):
         Tests starting the aggregator through an API call.
         """
 
-        from clmcservice.views import AggregatorController  # nested import so that importing the class view is part of the test itself
+        from clmcservice.aggregationapi.views import AggregatorController  # nested import so that importing the class view is part of the test itself
 
-        assert not AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert self.config.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
+        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
+        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
 
+        # test starting the aggregator when in unconfigured state
+        setup_config = self.registry.get_settings()[CONF_OBJECT]
+        self.registry.get_settings()[CONF_OBJECT] = None
+        request = testing.DummyRequest()
+        input_body = '{"action": "start"}'
+        request.body = input_body.encode(request.charset)
+        error_raised = False
+        try:
+            AggregatorController(request).put()
+        except HTTPBadRequest:
+            error_raised = True
+        assert error_raised, "Error must be raised in case of a not configured aggregator."
+        self.registry.get_settings()[CONF_OBJECT] = setup_config
+
+        # test starting the aggregation when in configured state
         request = testing.DummyRequest()
         input_body = '{"action": "start"}'
         request.body = input_body.encode(request.charset)
 
         response = AggregatorController(request).put()
         assert response == {RUNNING_FLAG: True}, "The aggregator should have been started."
-        assert AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been started."
-        assert self.config.get_settings().get(PROCESS_ATTRIBUTE) is not None, "Aggregator process should have been initialized."
+        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been started."
+        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is not None, "Aggregator process should have been initialized."
 
         # kill the started process after the test is over
         pid = request.registry.settings[PROCESS_ATTRIBUTE].pid
@@ -154,18 +196,33 @@ class TestAggregatorAPI(object):
         Tests stopping the aggregator through an API call.
         """
 
-        from clmcservice.views import AggregatorController  # nested import so that importing the class view is part of the test itself
+        from clmcservice.aggregationapi.views import AggregatorController  # nested import so that importing the class view is part of the test itself
+
+        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
+        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
 
-        assert not AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert self.config.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
+        # test stopping the aggregator when in unconfigured state
+        setup_config = self.registry.get_settings()[CONF_OBJECT]
+        self.registry.get_settings()[CONF_OBJECT] = None
+        request = testing.DummyRequest()
+        input_body = '{"action": "stop"}'
+        request.body = input_body.encode(request.charset)
+        error_raised = False
+        try:
+            AggregatorController(request).put()
+        except HTTPBadRequest:
+            error_raised = True
+        assert error_raised, "Error must be raised in case of a not configured aggregator."
+        self.registry.get_settings()[CONF_OBJECT] = setup_config
 
+        # test stopping the aggregation when in configured state
         # send a start request to trigger the aggregator
         request = testing.DummyRequest()
         input_body = '{"action": "start"}'
         request.body = input_body.encode(request.charset)
         AggregatorController(request).put()
-        assert self.config.get_settings().get(PROCESS_ATTRIBUTE) is not None, "Aggregator process should have been initialized."
-        assert AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "Aggregator process should have been initialized."
+        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is not None, "Aggregator process should have been initialized."
+        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Aggregator process should have been initialized."
 
         # test stopping the aggregator process when it is running
         request = testing.DummyRequest()
@@ -174,8 +231,8 @@ class TestAggregatorAPI(object):
 
         response = AggregatorController(request).put()
         assert response == {RUNNING_FLAG: False}, "The aggregator should have been stopped."
-        assert not AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been stopped."
-        assert self.config.get_settings().get(PROCESS_ATTRIBUTE) is None, "Aggregator process should have been terminated."
+        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been stopped."
+        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Aggregator process should have been terminated."
 
         sleep(2)  # put a 2 seconds timeout so that the aggregator process can terminate
 
@@ -186,19 +243,34 @@ class TestAggregatorAPI(object):
 
         response = AggregatorController(request).put()
         assert response == {RUNNING_FLAG: False}, "The aggregator should have been stopped."
-        assert  not AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been stopped."
-        assert self.config.get_settings().get(PROCESS_ATTRIBUTE) is None, "Aggregator process should have been terminated."
+        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been stopped."
+        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Aggregator process should have been terminated."
 
     def test_restart(self):
         """
         Tests restarting the aggregator through an API call.
         """
 
-        from clmcservice.views import AggregatorController  # nested import so that importing the class view is part of the test itself
+        from clmcservice.aggregationapi.views import AggregatorController  # nested import so that importing the class view is part of the test itself
 
-        assert not AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert self.config.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
+        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
+        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
+
+        # test restarting the aggregator when in unconfigured state
+        setup_config = self.registry.get_settings()[CONF_OBJECT]
+        self.registry.get_settings()[CONF_OBJECT] = None
+        request = testing.DummyRequest()
+        input_body = '{"action": "restart"}'
+        request.body = input_body.encode(request.charset)
+        error_raised = False
+        try:
+            AggregatorController(request).put()
+        except HTTPBadRequest:
+            error_raised = True
+        assert error_raised, "Error must be raised in case of a not configured aggregator."
+        self.registry.get_settings()[CONF_OBJECT] = setup_config
 
+        # test restarting the aggregation when in configured state
         # test restarting the aggregator process when it is stopped
         request = testing.DummyRequest()
         input_body = '{"action": "restart"}'
@@ -206,8 +278,8 @@ class TestAggregatorAPI(object):
 
         response = AggregatorController(request).put()
         assert response == {RUNNING_FLAG: True}, "The aggregator should have been restarted."
-        assert AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been restarted."
-        assert self.config.get_settings().get(PROCESS_ATTRIBUTE), "The aggregator process should have been reinitialised."
+        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been restarted."
+        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE), "The aggregator process should have been reinitialised."
 
         # test restarting the aggregator process when it is running
         request = testing.DummyRequest()
@@ -216,8 +288,8 @@ class TestAggregatorAPI(object):
 
         response = AggregatorController(request).put()
         assert response == {RUNNING_FLAG: True}, "The aggregator should have been restarted."
-        assert AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been restarted."
-        assert self.config.get_settings().get(PROCESS_ATTRIBUTE), "The aggregator process should have been reinitialised."
+        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been restarted."
+        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE), "The aggregator process should have been reinitialised."
 
         # kill the started process after the test is over
         pid = request.registry.settings[PROCESS_ATTRIBUTE].pid
@@ -237,10 +309,10 @@ class TestAggregatorAPI(object):
         Tests sending a malformed type of action to the aggregator through an API call.
         """
 
-        from clmcservice.views import AggregatorController  # nested import so that importing the class view is part of the test itself
+        from clmcservice.aggregationapi.views import AggregatorController  # nested import so that importing the class view is part of the test itself
 
-        assert not AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert self.config.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
+        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
+        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
 
         # test restarting the aggregator process when it is running
         request = testing.DummyRequest()
@@ -260,18 +332,18 @@ class TestAggregatorAPI(object):
         Tests the GET method for the status of the aggregator.
         """
 
-        from clmcservice.views import AggregatorController  # nested import so that importing the class view is part of the test itself
+        from clmcservice.aggregationapi.views import AggregatorController  # nested import so that importing the class view is part of the test itself
 
-        assert not AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert self.config.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
+        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
+        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
 
         request = testing.DummyRequest()
         response = AggregatorController(request).get()
 
         assert response == {'aggregator_running': False}, "Response must be a dictionary representing a JSON object with the correct status data of the aggregator."
 
-        assert not AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "A GET request must not modify the aggregator status flag."
-        assert self.config.get_settings().get(PROCESS_ATTRIBUTE) is None, "A GET request must not start the aggregator process."
+        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "A GET request must not modify the aggregator status flag."
+        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "A GET request must not start the aggregator process."
 
         # test status with malformed configuration
         # start the aggregator
@@ -279,7 +351,7 @@ class TestAggregatorAPI(object):
         input_body = '{"action": "start"}'
         request.body = input_body.encode(request.charset)
         AggregatorController(request).put()
-        self.config.get_settings()[MALFORMED_FLAG] = True
+        self.registry.get_settings()[MALFORMED_FLAG] = True
 
         request = testing.DummyRequest()
         response = AggregatorController(request).get()
@@ -289,9 +361,9 @@ class TestAggregatorAPI(object):
                             'comment': 'Aggregator is running in a malformed state - it uses an old version of the configuration. Please, restart it so that the updated configuration is used.'}, \
             "Response must be a dictionary representing a JSON object with the correct status data of the aggregator."
 
-        assert AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "A GET request must not modify the aggregator status flag."
-        assert self.config.get_settings().get(MALFORMED_FLAG), "A GET request must not modify the aggregator malformed flag."
-        assert self.config.get_settings().get(PROCESS_ATTRIBUTE) is not None, "A GET request must not stop the aggregator process."
+        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "A GET request must not modify the aggregator status flag."
+        assert self.registry.get_settings().get(MALFORMED_FLAG), "A GET request must not modify the aggregator malformed flag."
+        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is not None, "A GET request must not stop the aggregator process."
 
         # kill the started process after the test is over
         pid = request.registry.settings[PROCESS_ATTRIBUTE].pid
@@ -302,14 +374,14 @@ class TestAggregatorAPI(object):
         Tests the behaviour of the malformed configuration flag of the aggregator when doing a sequence of API calls.
         """
 
-        from clmcservice.views import AggregatorController, AggregatorConfig  # nested import so that importing the class view is part of the test itself
+        from clmcservice.aggregationapi.views import AggregatorController, AggregatorConfig  # nested import so that importing the class view is part of the test itself
 
-        assert not AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
-        assert not self.config.get_settings().get(MALFORMED_FLAG), "Initially aggregator is not in a malformed state"
-        assert self.config.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
-        assert self.config.get_settings().get('aggregator_report_period') == 5, "Initial report period is 5 seconds."
-        assert self.config.get_settings().get('aggregator_database_name') == 'CLMCMetrics', "Initial database name the aggregator uses is CLMCMetrics."
-        assert self.config.get_settings().get('aggregator_database_url') == "http://172.40.231.51:8086", "Initial aggregator url is http://172.40.231.51:8086"
+        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "Initially aggregator is not running."
+        assert not self.registry.get_settings().get(MALFORMED_FLAG), "Initially aggregator is not in a malformed state"
+        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running."
+        assert int(self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_report_period')) == 5, "Initial report period is 5 seconds."
+        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_name') == 'CLMCMetrics', "Initial database name the aggregator uses is CLMCMetrics."
+        assert self.registry.get_settings()[CONF_OBJECT][AGGREGATOR_CONFIG_SECTION].get('aggregator_database_url') == "http://172.40.231.51:8086", "Initial aggregator url is http://172.40.231.51:8086"
 
         # start the aggregator with the default configuration
         request = testing.DummyRequest()
@@ -328,9 +400,9 @@ class TestAggregatorAPI(object):
         response = AggregatorConfig(request).put()
         assert response == output_body, "Response of PUT request must include the new configuration of the aggregator"
 
-        assert AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator shouldn't be stopped when the configuration is updated."
-        assert self.config.get_settings().get(MALFORMED_FLAG), "The malformed flag should be set when the configuration is updated while the process is running."
-        assert self.config.get_settings().get(PROCESS_ATTRIBUTE) is not None, "The aggregator shouldn't be stopped when the configuration is updated."
+        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator shouldn't be stopped when the configuration is updated."
+        assert self.registry.get_settings().get(MALFORMED_FLAG), "The malformed flag should be set when the configuration is updated while the process is running."
+        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is not None, "The aggregator shouldn't be stopped when the configuration is updated."
 
         # check that the malformed flag has been updated through a GET call
         request = testing.DummyRequest()
@@ -346,9 +418,9 @@ class TestAggregatorAPI(object):
         request.body = input_body.encode(request.charset)
         response = AggregatorController(request).put()
         assert response == {RUNNING_FLAG: True}, "The aggregator should have been restarted."
-        assert AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been restarted."
-        assert not self.config.get_settings().get(MALFORMED_FLAG), "The malformed flag should have been reset to False."
-        assert self.config.get_settings().get(PROCESS_ATTRIBUTE) is not None, "The aggregator should have been restarted."
+        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been restarted."
+        assert not self.registry.get_settings().get(MALFORMED_FLAG), "The malformed flag should have been reset to False."
+        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is not None, "The aggregator should have been restarted."
 
         # update the configuration again while the aggregator is running
         config_body = '{"aggregator_report_period": 30, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.50.231.51:8086"}'
@@ -359,20 +431,67 @@ class TestAggregatorAPI(object):
         response = AggregatorConfig(request).put()
         assert response == output_body, "Response of PUT request must include the new configuration of the aggregator"
 
-        assert AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator shouldn't be stopped when the configuration is updated."
-        assert self.config.get_settings().get(MALFORMED_FLAG), "The malformed flag should be set when the configuration is updated while the process is running."
-        assert self.config.get_settings().get(PROCESS_ATTRIBUTE) is not None, "The aggregator shouldn't be stopped when the configuration is updated."
+        assert AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator shouldn't be stopped when the configuration is updated."
+        assert self.registry.get_settings().get(MALFORMED_FLAG), "The malformed flag should be set when the configuration is updated while the process is running."
+        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is not None, "The aggregator shouldn't be stopped when the configuration is updated."
 
         # stop the aggregator - this should also reset the malformed status flag
-        # restart the aggregator with the new configuration
         request = testing.DummyRequest()
         input_body = '{"action": "stop"}'
         request.body = input_body.encode(request.charset)
         response = AggregatorController(request).put()
         assert response == {RUNNING_FLAG: False}, "The aggregator should have been stopped."
-        assert not AggregatorController.is_process_running(self.config.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been stopped."
-        assert not self.config.get_settings().get(MALFORMED_FLAG), "The malformed flag should have been reset to False."
-        assert self.config.get_settings().get(PROCESS_ATTRIBUTE) is None, "The aggregator should have been stopped."
+        assert not AggregatorController.is_process_running(self.registry.get_settings().get(PROCESS_ATTRIBUTE)), "The aggregator should have been stopped."
+        assert not self.registry.get_settings().get(MALFORMED_FLAG), "The malformed flag should have been reset to False."
+        assert self.registry.get_settings().get(PROCESS_ATTRIBUTE) is None, "The aggregator should have been stopped."
+
+    def test_unconfigured_state(self):
+        """
+        Tests the behaviour of the service when in unconfigured state.
+        """
+
+        from clmcservice.aggregationapi.views import AggregatorConfig, AggregatorController
+
+        self.registry.get_settings()[CONF_OBJECT] = None  # unconfigured state - conf object is None
+
+        # when doing a GET for the configuration we expect a bad request if the service is in unconfigured state
+        bad_request = False
+        bad_request_msg = None
+        try:
+            request = testing.DummyRequest()
+            AggregatorConfig(request).get()
+        except HTTPBadRequest as err:
+            bad_request = True
+            bad_request_msg = err.message
+
+        assert bad_request
+        assert bad_request_msg == "Aggregator has not been configured, yet. Send a PUT request to /aggregator/config with a JSON body of the configuration."
+
+        # when doing a PUT for the aggregator to start/stop/restart we expect a bad request if the service is in unconfigured state
+        for action in ('start', 'stop', 'restart'):
+            bad_request = False
+            bad_request_msg = None
+            try:
+                request = testing.DummyRequest()
+                request.body = ('{"action": "' + action + '"}').encode(request.charset)
+                AggregatorController(request).put()
+            except HTTPBadRequest as err:
+                bad_request = True
+                bad_request_msg = err.message
+
+            assert bad_request
+            assert bad_request_msg == "You must configure the aggregator before controlling it. Send a PUT request to /aggregator/config with a JSON body of the configuration."
+
+        # configure the aggregator
+        input_body = '{"aggregator_report_period": 10, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://171.40.231.51:8086"}'
+        output_body = {'aggregator_report_period': 10, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://171.40.231.51:8086"}
+        request = testing.DummyRequest()
+        request.body = input_body.encode(request.charset)
+        response = AggregatorConfig(request).put()
+        assert response == output_body
+
+        request = testing.DummyRequest()
+        assert AggregatorConfig(request).get() == output_body
 
 
 class TestRegexURL(object):
diff --git a/src/service/clmcservice/utilities.py b/src/service/clmcservice/aggregationapi/utilities.py
similarity index 78%
rename from src/service/clmcservice/utilities.py
rename to src/service/clmcservice/aggregationapi/utilities.py
index 44ccffed7ce4120b22c31b55a7b81cde33344f9b..2375300730d47d2f6927961674f8064bc91bfe3a 100644
--- a/src/service/clmcservice/utilities.py
+++ b/src/service/clmcservice/aggregationapi/utilities.py
@@ -1,3 +1,4 @@
+#!/usr/bin/python3
 """
 // © University of Southampton IT Innovation Centre, 2018
 //
@@ -23,7 +24,12 @@
 
 from json import loads
 from re import compile, IGNORECASE
+from configparser import ConfigParser
 
+CONF_FILE_ATTRIBUTE = 'configuration_file_path'  # the attribute pointing to the configuration file path
+CONF_OBJECT = 'configuration_object'  # the attribute, which stores the service configuration object
+
+AGGREGATOR_CONFIG_SECTION = "AGGREGATOR"  # the section in the configuration holding all the configuration attributes declared below
 CONFIG_ATTRIBUTES = ('aggregator_report_period', 'aggregator_database_name', 'aggregator_database_url')  # all of the configuration attributes - to be used as dictionary keys
 
 RUNNING_FLAG = 'aggregator_running'  # Attribute for storing the flag, which shows whether the aggregator is running or not - to be used as a dictionary key
@@ -37,10 +43,6 @@ MALFORMED_FLAG = 'malformed'  # Attribute for storing the flag, which shows whet
 COMMENT_ATTRIBUTE = 'comment'
 COMMENT_VALUE = 'Aggregator is running in a malformed state - it uses an old version of the configuration. Please, restart it so that the updated configuration is used.'
 
-# the attributes of the JSON response body that are expected when querying round trip time
-ROUND_TRIP_ATTRIBUTES = ('media_service', 'start_timestamp', 'end_timestamp')
-
-
 URL_REGEX = compile(
     r'^https?://'  # http:// or https://
     r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'  # domain, e.g. example.domain.com
@@ -67,7 +69,7 @@ def validate_config_content(configuration):
     except:
         raise AssertionError("Configuration must be a JSON object.")
 
-    assert len(configuration) == len(CONFIG_ATTRIBUTES), "Configuration mustn't contain more attributes than the required ones."
+    assert len(configuration) == len(CONFIG_ATTRIBUTES), "Configuration mustn't contain a different number of attributes than the number of required ones."
 
     for attribute in CONFIG_ATTRIBUTES:
         assert attribute in configuration, "Required attribute not found in the request content."
@@ -102,23 +104,39 @@ def validate_action_content(content):
     return content
 
 
-def validate_round_trip_query_params(params):
+def validate_conf_file(conf_file_path):
     """
-    A utility function to validate a dictionary of parameters.
+    Validates the aggregator's configuration file - checks for existence of the file path, whether it can be parsed as a configuration file and
+    whether it contains the required configuration attributes.
 
-    :param params: the params dict to validate
-    :return: the validated parameters dictionary
-    :raise AssertionError: if the argument is not a valid json content
-   """
+    :param conf_file_path: the configuration file path to check
+
+    :return: the parsed configuration if valid, None otherwise
+    """
+
+    global AGGREGATOR_CONFIG_SECTION, CONFIG_ATTRIBUTES
+
+    conf = ConfigParser()
+    result = conf.read(conf_file_path)
 
-    global ROUND_TRIP_ATTRIBUTES
+    # if result doesn't contain one element, namely the conf_file_path,
+    # then the configuration file cannot be parsed for some reason (doesn't exist, cannot be opened, invalid, etc.)
+    if len(result) == 0:
+        return None
 
-    assert len(params) == len(ROUND_TRIP_ATTRIBUTES), "Content mustn't contain more attributes than the required ones."
+    if AGGREGATOR_CONFIG_SECTION not in conf.sections():
+        return None  # the config should include a section called AGGREGATOR
 
-    for attribute in ROUND_TRIP_ATTRIBUTES:
-        assert attribute in params, "Required attribute not found in the request content."
+    for key in CONFIG_ATTRIBUTES:
+        if key not in conf[AGGREGATOR_CONFIG_SECTION]:
+            return None  # the configuration must include each configuration attribute
+
+    try:
+        int(conf[AGGREGATOR_CONFIG_SECTION]['aggregator_report_period'])
+    except ValueError:
+        return None  # the configuration must contain a valid integer for the aggregator's report period
 
-    return params
+    return conf
 
 
 def generate_e2e_delay_report(path_id, source_sfr, target_sfr, endpoint, sf_instance, delay_forward, delay_reverse, delay_service, avg_request_size, avg_response_size, avg_bandwidth, time):
diff --git a/src/service/clmcservice/aggregationapi/views.py b/src/service/clmcservice/aggregationapi/views.py
new file mode 100644
index 0000000000000000000000000000000000000000..154e3511de170ff0de2acbd885c9d2bcc8a338c7
--- /dev/null
+++ b/src/service/clmcservice/aggregationapi/views.py
@@ -0,0 +1,252 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          15-05-2018
+//      Created for Project :   FLAME
+"""
+
+from pyramid.view import view_defaults, view_config
+from pyramid.httpexceptions import HTTPBadRequest
+from subprocess import Popen
+from clmcservice.aggregationapi.utilities import validate_config_content, validate_action_content, \
+    CONF_OBJECT, CONF_FILE_ATTRIBUTE, AGGREGATOR_CONFIG_SECTION, CONFIG_ATTRIBUTES, RUNNING_FLAG, PROCESS_ATTRIBUTE, MALFORMED_FLAG, COMMENT_ATTRIBUTE, COMMENT_VALUE
+import os
+import os.path
+import sys
+import logging
+import configparser
+
+
+log = logging.getLogger('service_logger')
+
+
+@view_defaults(route_name='aggregator_config', renderer='json')
+class AggregatorConfig(object):
+    """
+    A class-based view for accessing and mutating the configuration of the aggregator.
+    """
+
+    def __init__(self, request):
+        """
+        Initialises the instance of the view with the request argument.
+
+        :param request: client's call request
+        """
+
+        self.request = request
+
+    @view_config(request_method="GET")
+    def get(self):
+        """
+        A GET API call for the configuration of the aggregator.
+
+        :return: A JSON response with the configuration of the aggregator.
+        """
+
+        aggregator_config_data = self.request.registry.settings[CONF_OBJECT]  # fetch the configuration object
+        if aggregator_config_data is None:
+            raise HTTPBadRequest("Aggregator has not been configured, yet. Send a PUT request to /aggregator/config with a JSON body of the configuration.")
+
+        config = {key: aggregator_config_data[AGGREGATOR_CONFIG_SECTION][key] for key in CONFIG_ATTRIBUTES}  # extract a json value containing the config attributes
+        config['aggregator_report_period'] = int(config['aggregator_report_period'])
+
+        return config
+
+    @view_config(request_method="PUT")
+    def put(self):
+        """
+        A PUT API call for the status of the aggregator.
+
+        :return: A JSON response to the PUT call - essentially with the new configured data and comment of the state of the aggregator
+        :raises HTTPBadRequest: if request body is not a valid JSON for the configurator
+        """
+
+        try:
+            new_config = self.request.body.decode(self.request.charset)
+            new_config = validate_config_content(new_config)  # validate the content and receive a json dictionary object
+        except AssertionError as e:
+            raise HTTPBadRequest("Bad request content. Configuration format is incorrect: {0}".format(e.args))
+
+        conf = self.request.registry.settings[CONF_OBJECT]
+        if conf is None:
+            conf = configparser.ConfigParser()
+            conf[AGGREGATOR_CONFIG_SECTION] = {}
+            self.request.registry.settings[CONF_OBJECT] = conf
+            old_config = {}
+        else:
+            # save the old configuration before updating so that it can be compared to the new one and checked for malformed state
+            old_config = {attribute: conf[AGGREGATOR_CONFIG_SECTION][attribute] for attribute in CONFIG_ATTRIBUTES}
+            old_config['aggregator_report_period'] = int(old_config['aggregator_report_period'])
+
+        for attribute in CONFIG_ATTRIBUTES:
+            conf[AGGREGATOR_CONFIG_SECTION][attribute] = str(new_config.get(attribute))  # update the configuration attributes
+
+        # if configuration is not already malformed, check whether the configuration is updated (changed in any way), if so (and the aggregator is running), malformed state is detected
+        if not self.request.registry.settings[MALFORMED_FLAG]:
+            malformed = old_config != new_config and AggregatorController.is_process_running(self.request.registry.settings.get(PROCESS_ATTRIBUTE))
+            self.request.registry.settings[MALFORMED_FLAG] = malformed
+            if malformed:
+                new_config[MALFORMED_FLAG] = True
+                new_config[COMMENT_ATTRIBUTE] = COMMENT_VALUE
+
+        self._write_conf_file()  # save the updated configuration to conf file
+        return new_config
+
+    def _write_conf_file(self):
+        """
+        Writes the configuration settings of the aggregator to a file with path stored at CONF_FILE_ATTRIBUTE
+        """
+
+        conf = self.request.registry.settings[CONF_OBJECT]
+        conf_file_path = self.request.registry.settings[CONF_FILE_ATTRIBUTE]
+        os.makedirs(os.path.dirname(conf_file_path), exist_ok=True)
+
+        log.info("Saving configuration to file {0}.".format(conf_file_path))
+        with open(conf_file_path, 'w') as configfile:
+            log.info("Opened configuration file {0}.".format(conf_file_path))
+            conf.write(configfile)
+        log.info("Successfully saved configuration to file {0}.".format(conf_file_path))
+
+
+@view_defaults(route_name='aggregator_controller', renderer='json')
+class AggregatorController(object):
+
+    """
+    A class-based view for controlling the aggregator.
+    """
+
+    def __init__(self, request):
+        """
+        Initialises the instance of the view with the request argument.
+
+        :param request: client's call request
+        """
+
+        self.request = request
+
+    @view_config(request_method="GET")
+    def get(self):
+        """
+        A GET API call for the status of the aggregator - running or not.
+
+        :return: A JSON response with the status of the aggregator.
+        """
+
+        aggregator_data = self.request.registry.settings
+        aggregator_process = aggregator_data.get(PROCESS_ATTRIBUTE)
+        aggregator_running = self.is_process_running(aggregator_process)
+
+        config = {RUNNING_FLAG: aggregator_running}
+
+        if aggregator_data[MALFORMED_FLAG] and aggregator_running:
+            config[MALFORMED_FLAG] = True
+            config[COMMENT_ATTRIBUTE] = COMMENT_VALUE
+
+        return config
+
+    @view_config(request_method="PUT")
+    def put(self):
+        """
+        A PUT API call for the status of the aggregator.
+
+        :return: A JSON response to the PUT call - essentially saying whether the aggregator is running or not
+        :raises HTTPBadRequest: if request body is not a valid JSON for the controller
+        """
+
+        content = self.request.body.decode(self.request.charset)
+
+        try:
+            content = validate_action_content(content)
+
+            conf = self.request.registry.settings[CONF_OBJECT]
+            if conf is None:
+                raise HTTPBadRequest("You must configure the aggregator before controlling it. Send a PUT request to /aggregator/config with a JSON body of the configuration.")
+
+            aggregator_config = {attribute: conf[AGGREGATOR_CONFIG_SECTION][attribute] for attribute in CONFIG_ATTRIBUTES}
+            aggregator_config['aggregator_report_period'] = int(aggregator_config['aggregator_report_period'])
+
+            action = content['action']
+
+            aggregator_running = self.is_process_running(self.request.registry.settings.get(PROCESS_ATTRIBUTE))
+            if action == 'start':
+                if not aggregator_running:
+                    process = self.start_aggregator(aggregator_config)
+                    aggregator_running = True
+                    self.request.registry.settings[PROCESS_ATTRIBUTE] = process
+                    self.request.registry.settings[MALFORMED_FLAG] = False
+            elif action == 'stop':
+                self.stop_aggregator(self.request.registry.settings.get(PROCESS_ATTRIBUTE))
+                aggregator_running = False
+                self.request.registry.settings[PROCESS_ATTRIBUTE] = None
+                self.request.registry.settings[MALFORMED_FLAG] = False
+            elif action == 'restart':
+                self.stop_aggregator(self.request.registry.settings.get(PROCESS_ATTRIBUTE))
+                process = self.start_aggregator(aggregator_config)
+                aggregator_running = True
+                self.request.registry.settings[PROCESS_ATTRIBUTE] = process
+                self.request.registry.settings[MALFORMED_FLAG] = False
+
+            return {RUNNING_FLAG: aggregator_running}
+
+        except AssertionError:
+            raise HTTPBadRequest('Bad request content - must be in JSON format: {"action": value}, where value is "start", "stop" or "restart".')
+
+    @staticmethod
+    def start_aggregator(config):
+        """
+        An auxiliary method to start the aggregator.
+
+        :param config: the configuration containing the arguments for the aggregator
+        :return: the process object of the started aggregator script
+        """
+
+        python_interpreter = sys.executable
+        command = [python_interpreter, '-m', 'clmcservice.aggregation.aggregator', '--period', str(config.get('aggregator_report_period')), '--database',
+                   config.get('aggregator_database_name'), '--url', config.get('aggregator_database_url')]
+        process = Popen(command)
+
+        log.info("\nStarted aggregator process with PID: {0}\n".format(process.pid))
+
+        return process
+
+    @staticmethod
+    def stop_aggregator(process):
+        """
+        An auxiliary method to stop the aggregator.
+
+        :param process: the process to terminate
+        """
+
+        # check if the process is started
+        if AggregatorController.is_process_running(process):
+            process.terminate()
+            log.info("\nStopped aggregator process with PID: {0}\n".format(process.pid))
+
+    @staticmethod
+    def is_process_running(process):
+        """
+        Checks if a process is running.
+
+        :param process: the Popen object to check
+        :return: True if running, False otherwise
+        """
+
+        # check if the process is started before trying to terminate it - process.poll() only returns something if the process has terminated, hence we check for a None value
+        return process is not None and process.poll() is None
diff --git a/src/service/clmcservice/configapi/__init__.py b/src/service/clmcservice/configapi/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc
--- /dev/null
+++ b/src/service/clmcservice/configapi/__init__.py
@@ -0,0 +1 @@
+
diff --git a/src/service/clmcservice/configapi/conftest.py b/src/service/clmcservice/configapi/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..105ebe81d84e5bead8e73dd2d069026c58e20ee7
--- /dev/null
+++ b/src/service/clmcservice/configapi/conftest.py
@@ -0,0 +1,40 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          03-07-2018
+//      Created for Project :   FLAME
+"""
+
+
+import pytest
+from clmcservice.whoamiapi.conftest import create_test_database, initialise_database, drop_test_database
+
+
+@pytest.fixture(scope='module', autouse=True)
+def testing_db_session():
+    test_database = "configtestdb"
+    create_test_database(test_database)  # create a database used for executing the unit tests
+    db_session, engine = initialise_database(test_database)  # initialise the database with the models and retrieve a db session
+
+    yield db_session  # return the db session if needed in any of the tests
+
+    db_session.remove()  # remove the db session
+    engine.dispose()  # dispose from the engine
+    drop_test_database(test_database)  # remove the test database
diff --git a/src/service/clmcservice/configapi/tests.py b/src/service/clmcservice/configapi/tests.py
new file mode 100644
index 0000000000000000000000000000000000000000..39ee0ed900bd66f702960457aee532db404c3a89
--- /dev/null
+++ b/src/service/clmcservice/configapi/tests.py
@@ -0,0 +1,315 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          02-07-2018
+//      Created for Project :   FLAME
+"""
+
+import pytest
+from json import dumps
+from pyramid import testing
+from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound, HTTPConflict
+from clmcservice.models import ServiceFunctionChain
+from clmcservice.configapi.views import SFCConfigAPI
+
+
+class TestSFCConfigAPI(object):
+    """
+    A pytest-implementation test for the Config API endpoints for service function chains
+    """
+
+    @pytest.fixture(autouse=True)
+    def app_config(self):
+        """
+        A fixture to implement setUp/tearDown functionality for all tests by initializing configuration structure for the web service and db connection
+        """
+
+        self.registry = testing.setUp()
+
+        yield
+
+        testing.tearDown()
+        ServiceFunctionChain.delete_all()  # clear the instances of the model in the test database
+
+    def test_get_all(self):
+        """
+        Tests the GET all method of the config API for service function chains - returns a list of all service function chains from the database.
+        """
+
+        request = testing.DummyRequest()
+        response = SFCConfigAPI(request).get_all()
+        assert response == [], "Initially there mustn't be any service function chains in the database."
+
+        sfc = ServiceFunctionChain(sfc="sfc1", chain={"nginx": ["minio"]})
+        expected_response_data = [sfc.json]
+        ServiceFunctionChain.add(sfc)  # adds the new instance of the model to the database
+
+        request = testing.DummyRequest()
+        response = SFCConfigAPI(request).get_all()
+        assert response == expected_response_data, "Incorrect response data with 1 service function chain."
+
+        sfc = ServiceFunctionChain(sfc="sfc2", chain={"nginx": ["minio"]})
+        expected_response_data.append(sfc.json)
+        ServiceFunctionChain.add(sfc)
+        sfc = ServiceFunctionChain(sfc="sfc3", chain={"nginx": ["minio"]})
+        expected_response_data.append(sfc.json)
+        ServiceFunctionChain.add(sfc)
+
+        request = testing.DummyRequest()
+        response = SFCConfigAPI(request).get_all()
+        assert response == expected_response_data, "Incorrect response data with more than 1 service function chains."
+
+    def test_get_one(self):
+        """
+        Tests the GET one method of the config API for service function chains - returns a service function chain from the database.
+        """
+
+        request = testing.DummyRequest()
+        response = SFCConfigAPI(request).get_all()
+        assert response == [], "Initially there mustn't be any service function chains in the database."
+
+        self._validation_of_url_parameters_test("get_one")
+
+        sfc = ServiceFunctionChain(sfc="sfc1", chain={"nginx": ["minio"]})
+        expected_response_data = sfc.json
+        ServiceFunctionChain.add(sfc)  # adds the new instance of the model to the database
+
+        request = testing.DummyRequest()
+        request.params["sfc"] = "sfc1"
+        response = SFCConfigAPI(request).get_one()
+        assert response == expected_response_data, "Invalid data returned in the response of GET instance"
+
+        request = testing.DummyRequest()
+        request.params["sfc"] = "sfc2"
+        error_raised = False
+        try:
+            SFCConfigAPI(request).get_one()
+        except HTTPNotFound:
+            error_raised = True
+        assert error_raised, "Not found error must be raised in case of a non existing service function chain"
+
+    def test_post(self):
+        """
+        Tests the POST method of the config API for service function chains - creates a service function chain in the database.
+        """
+
+        request = testing.DummyRequest()
+        response = SFCConfigAPI(request).get_all()
+        assert response == [], "Initially there mustn't be any service function chains in the database."
+
+        resource = dict(sfc="sfc1", chain={"nginx": ["minio"]})
+        json_data = dumps(resource)
+        request = testing.DummyRequest()
+        request.body = json_data.encode(request.charset)
+        response = SFCConfigAPI(request).post()
+        assert response == resource, "POST request must return the created resource"
+        assert ServiceFunctionChain.exists("sfc1"), "POST request must have created the resource"
+
+        resource["chain"] = {}
+        json_data = dumps(resource)
+        request = testing.DummyRequest()
+        request.body = json_data.encode(request.charset)
+        error_raised = False
+        try:
+            SFCConfigAPI(request).post()
+        except HTTPConflict:
+            error_raised = True
+        assert error_raised, "An error must be raised when trying to create a resource which breaks the unique constraint"
+
+    @pytest.mark.parametrize("body, valid", [
+        ('{"sfc": "sfc1", "chain":{"nginx":["minio"]}}', True),
+        ('{"sfc": "sfc2", "chain":{}}', True),
+        ('{"sfc": "sfc1", "chain":[]}', False),
+        ('{}', False),
+        ('{"sfc": "sfc3"}', False),
+        ('{"sf": "sfc2", "sf_i": "sfc_i2", "chain":{}', False),
+        ('{invalid json}', False),
+    ])
+    def test_post_body_validation(self, body, valid):
+        """
+        Tests the POST request validation of the body content.
+
+        :param body: The request body to be validated
+        :param valid: True if body is valid, False otherwise
+        """
+
+        request = testing.DummyRequest()
+        request.body = body.encode(request.charset)
+        error_raised = False
+        try:
+            SFCConfigAPI(request).post()
+        except HTTPBadRequest:
+            error_raised = True
+        assert error_raised == (not valid), "An error must be raised in case of an invalid request body"
+
+    def test_put(self):
+        """
+        Tests the PUT method of the Config API for service function chains - overwrites a service function chain from the database.
+        """
+
+        request = testing.DummyRequest()
+        response = SFCConfigAPI(request).get_all()
+        assert response == [], "Initially there mustn't be any service function chains in the database."
+
+        self._validation_of_url_parameters_test("put")
+
+        resource = dict(sfc="sfc1", chain={"nginx": ["minio"]})
+        body = dumps(resource)
+        request = testing.DummyRequest()
+        request.params["sfc"] = "sfc1"
+        request.body = body.encode(request.charset)
+        error_raised = False
+        try:
+            SFCConfigAPI(request).put()
+        except HTTPNotFound:
+            error_raised = True
+        assert error_raised, "Not found error must be raised in case of a non existing service function chain"
+
+        sfc = ServiceFunctionChain(sfc="sfc1", chain={"nginx": ["minio"]})
+        ServiceFunctionChain.add(sfc)  # adds the new instance of the model to the database
+
+        resource = dict(sfc="sfc1", chain={})
+        body = dumps(resource)
+        request = testing.DummyRequest()
+        request.params["sfc"] = "sfc1"
+        request.body = body.encode(request.charset)
+        response = SFCConfigAPI(request).put()
+        assert response == resource, "PUT request must return the updated resource"
+        assert ServiceFunctionChain.get("sfc1").json["chain"] == {}
+
+        resource = dict(sfc="sfc2", chain={"nginx": ["minio"]})
+        body = dumps(resource)
+        request = testing.DummyRequest()
+        request.params["sfc"] = "sfc1"
+        request.body = body.encode(request.charset)
+        response = SFCConfigAPI(request).put()
+        assert response == resource, "PUT request must return the updated resource"
+        assert not ServiceFunctionChain.exists("sfc1"), "Resource has not been updated"
+        assert ServiceFunctionChain.exists("sfc2"), "Resource has not been updated"
+
+        sfc = ServiceFunctionChain(sfc="sfc1", chain={"nginx": ["minio"]})
+        ServiceFunctionChain.add(sfc)  # adds the new instance of the model to the database
+
+        resource = dict(sfc="sfc2", chain={"nginx": ["minio"]})
+        body = dumps(resource)
+        request = testing.DummyRequest()
+        request.params["sfc"] = "sfc1"
+        request.body = body.encode(request.charset)
+        error_raised = False
+        try:
+            SFCConfigAPI(request).put()
+        except HTTPConflict:
+            error_raised = True
+        assert error_raised, "PUT request breaks unique constraint"
+
+    @pytest.mark.parametrize("body, valid", [
+        ('{"sfc": "sfc1", "chain":{"nginx":["minio"]}}', True),
+        ('{"sfc": "sfc2", "chain":{}}', True),
+        ('{"sfc": "sfc1", "chain":[]}', False),
+        ('{}', False),
+        ('{"sfc": "sfc3"}', False),
+        ('{"sf": "sfc2", "sf_i": "sfc_i2", "chain":{}', False),
+        ('{invalid json}', False),
+    ])
+    def test_put_body_validation(self, body, valid):
+        """
+        Tests the PUT request validation of the body content.
+
+        :param body: The request body to be validated
+        :param valid: True if body is valid, False otherwise
+        """
+
+        sfc = ServiceFunctionChain(sfc="sfc1", chain={"nginx": ["minio"]})
+        ServiceFunctionChain.add(sfc)  # adds the new instance of the model to the database
+
+        request = testing.DummyRequest()
+        request.params["sfc"] = "sfc1"
+        request.body = body.encode(request.charset)
+        error_raised = False
+        try:
+            SFCConfigAPI(request).put()
+        except HTTPBadRequest:
+            error_raised = True
+        assert error_raised == (not valid), "An error must be raised in case of an invalid request body"
+
+    def test_delete(self):
+        """
+        Tests the DELETE method of the config API for service function chains - deletes a service function chain from the database.
+        """
+
+        request = testing.DummyRequest()
+        response = SFCConfigAPI(request).get_all()
+        assert response == [], "Initially there mustn't be any service function chains in the database."
+
+        self._validation_of_url_parameters_test("delete")
+
+        sfc = ServiceFunctionChain(sfc="sfc1", chain={"nginx": ["minio"]})
+        to_delete = sfc.json
+        ServiceFunctionChain.add(sfc)  # adds the new instance of the model to the database
+
+        assert ServiceFunctionChain.exists("sfc1")
+
+        request = testing.DummyRequest()
+        request.params["sfc"] = "sfc1"
+        response = SFCConfigAPI(request).delete()
+        assert response == to_delete, "DELETE must return the deleted object if successful"
+
+        assert not ServiceFunctionChain.exists("sfc1"), "Resource must be deleted after the delete API method has been called."
+
+        request = testing.DummyRequest()
+        request.params["sfc"] = "sfc1"
+        error_raised = False
+        try:
+            SFCConfigAPI(request).delete()
+        except HTTPNotFound:
+            error_raised = True
+        assert error_raised, "Not found error must be raised in case of a non existing service function chain"
+
+    @staticmethod
+    def _validation_of_url_parameters_test(method):
+        """
+        Validates the way a config API method handles url query parameters for service function chains
+
+        :param method: the method to test
+        """
+
+        request = testing.DummyRequest()
+        error_raised = False
+        try:
+            getattr(SFCConfigAPI(request), method).__call__()
+        except HTTPBadRequest:
+            error_raised = True
+        assert error_raised, "Error must be raised in case of no URL parameters"
+
+        request = testing.DummyRequest()
+        request.params["sfc_i"] = "sfc1"  # argument should be sfc
+        try:
+            getattr(SFCConfigAPI(request), method).__call__()
+        except HTTPBadRequest:
+            error_raised = True
+        assert error_raised, "Error must be raised in case of insufficient number of arguments"
+
+        request = testing.DummyRequest()
+        request.params["sf"] = "sfc1"  # argument should be sfc
+        try:
+            getattr(SFCConfigAPI(request), method).__call__()
+        except HTTPBadRequest:
+            error_raised = True
+        assert error_raised, "Error must be raised in case of invalid naming of arguments"
diff --git a/src/service/clmcservice/configapi/utilities.py b/src/service/clmcservice/configapi/utilities.py
new file mode 100644
index 0000000000000000000000000000000000000000..0605c0599c5b1a9224001cfa603f3d7fd9a01c9b
--- /dev/null
+++ b/src/service/clmcservice/configapi/utilities.py
@@ -0,0 +1,54 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          02-07-2018
+//      Created for Project :   FLAME
+"""
+
+from json import loads
+from clmcservice.models import ServiceFunctionChain
+
+
+def validate_sfchain_body(body):
+    """
+    Validates the request body used to create an service function chain resource in the database.
+
+    :param body: the request body to validate
+    :return the validated sfc dictionary object
+    :raise AssertionError: if the body is not a valid service function chain
+    """
+
+    try:
+        body = loads(body)
+    except:
+        raise AssertionError("Service function chain must be represented by a JSON object.")
+
+    assert len(body) == len(ServiceFunctionChain.__table__.columns), "Service function chain JSON object mustn't contain a different number of attributes than the number of required ones."
+
+    # validate that all required attributes are given in the body
+    for attribute in ServiceFunctionChain.__table__.columns:
+        assert attribute.name in body, "Required attribute not found in the request content."
+
+    assert type(body["chain"]) == dict, "The chain attribute of a service function chain must be a graph representing the relations between service functions."
+
+    for sf in body["chain"]:
+        assert type(body["chain"][sf]) == list, "A list must be used to represent each dependency between service functions"
+
+    return body
diff --git a/src/service/clmcservice/configapi/views.py b/src/service/clmcservice/configapi/views.py
new file mode 100644
index 0000000000000000000000000000000000000000..4cfdb69b6e842aa460c4a330961c339acd7421c0
--- /dev/null
+++ b/src/service/clmcservice/configapi/views.py
@@ -0,0 +1,185 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          02-07-2018
+//      Created for Project :   FLAME
+"""
+
+
+from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPNotFound
+from pyramid.view import view_defaults, view_config
+from clmcservice.models import ServiceFunctionChain
+from clmcservice.configapi.utilities import validate_sfchain_body
+
+
+@view_defaults(renderer='json')
+class SFCConfigAPI(object):
+    """
+    A class-based view for posting and retrieving configuration data for service function chains to the CLMC service
+    """
+
+    def __init__(self, request):
+        """
+        Initialises the instance of the view with the request argument.
+
+        :param request: client's call request
+        """
+
+        self.request = request
+
+    @view_config(route_name='config_sfc', request_method='GET')
+    def get_all(self):
+        """
+        GET API call for all resources.
+
+        :return: A list of all service function chains found in the database.
+        """
+
+        return [instance.json for instance in ServiceFunctionChain.query()]
+
+    @view_config(route_name='config_sfc_instance', request_method='GET')
+    def get_one(self):
+        """
+        GET API call for a single resources.
+
+        :return: One service function chain instance retrieved from the database by querying the sfc ID
+        :raises HTTPBadRequest: if the request parameters are invalid(invalid url query string)
+        :raises HTTPNotFound: if a resource with the given parameters doesn't exist in the database
+        """
+
+        sf_chain = self._get_sf_chain_from_url_string()
+        if sf_chain is None:
+            raise HTTPNotFound("A service function chain with the given parameters doesn't exist.")
+        else:
+            return sf_chain.json
+
+    @view_config(route_name='config_sfc', request_method='POST')
+    def post(self):
+        """
+        A POST API call to create a new service function chain.
+
+        :return: A JSON response to the POST call - essentially with the data of the new resource
+        :raises HTTPBadRequest: if request body is not a valid JSON for the service function chain
+        :raises HTTPConflict: if the unique constraints are not preserved after the creation of a new instance
+        """
+
+        # create an instance of the model and add it to the database table
+        sf_chain = self._validate_and_create()
+        json_data = sf_chain.json
+        ServiceFunctionChain.add(sf_chain)
+
+        self.request.response.status = 201
+
+        return json_data
+
+    @view_config(route_name='config_sfc_instance', request_method='PUT')
+    def put(self):
+        """
+        A PUT API call to update a service function chain.
+
+        :return: A JSON response representing the updated object
+        :raises HTTPBadRequest: if the request parameters are invalid(invalid url query string)
+        :raises HTTPNotFound: if a resource with the given parameters doesn't exist in the database
+        """
+
+        sf_chain = self._get_sf_chain_from_url_string()
+        if sf_chain is None:
+            raise HTTPNotFound("A service function chain with the given ID doesn't exist.")
+        else:
+            try:
+                body = self.request.body.decode(self.request.charset)
+                validated_body = validate_sfchain_body(body)  # validate the content and receive a json dictionary object
+            except AssertionError as e:
+                raise HTTPBadRequest("Bad request content. Service function chain format is incorrect: {0}".format(e.args))
+
+            new_resource = validated_body
+            old_resource = sf_chain.json
+            updating = new_resource["sfc"] == old_resource["sfc"]
+
+            if updating:
+                ServiceFunctionChain.delete(sf_chain)
+                new_sf_chain = ServiceFunctionChain(**validated_body)
+                ServiceFunctionChain.add(new_sf_chain)
+            else:
+                resource_exists = ServiceFunctionChain.exists(new_resource["sfc"])
+                if resource_exists:
+                    raise HTTPConflict("Service function chain with this data already exists.")  # error 409 in case of resource conflict
+
+                new_sf_chain = ServiceFunctionChain(**validated_body)
+                ServiceFunctionChain.replace(sf_chain, new_sf_chain)
+
+            return validated_body
+
+    @view_config(route_name='config_sfc_instance', request_method='DELETE')
+    def delete(self):
+        """
+        Deletes an instance of a service function chain in the database.
+
+        :return: An content of the object that has been deleted
+        :raises HTTPBadRequest: if the request parameters are invalid(invalid url query string)
+        :raises HTTPNotFound: if a resource with the given parameters doesn't exist in the database
+        """
+
+        sf_chain = self._get_sf_chain_from_url_string()
+        if sf_chain is None:
+            raise HTTPNotFound("A service function chain with the given ID doesn't exist.")
+        else:
+            deleted = sf_chain.json
+            ServiceFunctionChain.delete(sf_chain)
+            return deleted
+
+    def _get_sf_chain_from_url_string(self):
+        """
+        Retrieves a service function chain from the database by validating and then using the request url parameters.
+
+        :return: An instance of a service function chain or None if not existing
+        """
+
+        if "sfc" not in self.request.params:
+            raise HTTPBadRequest("Request format is incorrect: URL argument 'sfc' not found")
+
+        sf_chain = ServiceFunctionChain.get(sfc=self.request.params["sfc"])
+        return sf_chain
+
+    def _validate_and_create(self):
+        """
+        Validates the request body and checks if a resource with the given attributes already exists.
+
+        :return: a new instance of the model, if the resource doesn't exist
+        :raises HTTPBadRequest: if request body is not a valid JSON for the service function chain
+        :raises HTTPConflict: if the unique constraints are not preserved after the creation of a new instance
+        """
+
+        try:
+            body = self.request.body.decode(self.request.charset)
+            validated_body = validate_sfchain_body(body)  # validate the content and receive a json dictionary object
+        except AssertionError as e:
+            raise HTTPBadRequest("Bad request content. Service function chain format is incorrect: {0}".format(e.args))
+
+        resource = validated_body
+
+        resource_exists = ServiceFunctionChain.exists(resource["sfc"])
+        if resource_exists:
+            raise HTTPConflict("Service function chain with this data already exists.")  # error 409 in case of resource conflict
+
+        # create an instance of the model
+        sf_chain = ServiceFunctionChain(**resource)
+
+        return sf_chain
diff --git a/src/service/clmcservice/generate_network_measurements.py b/src/service/clmcservice/generate_network_measurements.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d1a16268052771c0675094a3872e01c05e7917a
--- /dev/null
+++ b/src/service/clmcservice/generate_network_measurements.py
@@ -0,0 +1,99 @@
+import getopt
+import sys
+from itertools import permutations
+from influxdb import InfluxDBClient
+from json import load
+from py2neo import Graph, Node, Relationship
+
+
+def report_network_measurements(influx_host, db_name, json_data, neo4j_host, neo4j_password):
+    """
+    Generates network measurements which follow the telegraf ping plugin format.
+
+    :param influx_host: influx DB host
+    :param db_name: name of database
+    :param json_data: the network configuration data
+    :param neo4j_host: the neo4j db host
+    :param neo4j_password: the neo4j db password
+    """
+
+    # declares the data to push to influx - host, url, avg_response_ms, min_response_ms, max_response_ms
+    data = tuple((link["source"], link["target"], link["avg_response_time"], link["min_response_time"], link["max_response_time"]) for link in json_data["links"])
+
+    json_body = [
+        {"measurement": "ping",
+         "tags": {"host": host, "url": url},
+         "fields": {"packets_transmitted": 10, "reply_received": 10, "packets_received": 10,
+                    "percent_reply_loss": 0, "percent_packets_loss": 0, "errors": 0, "average_response_ms": avg_ms,
+                    "minimum_response_ms": min_ms, "maximum_response_ms": max_ms, "result_code": 0},
+         "time": 1528385860 * 10**9
+         } for host, url, avg_ms, min_ms, max_ms in data
+    ]
+
+    print("Establishing connection with influx DB on {0} with database {1}".format(influx_host, db_name))
+    db_client = InfluxDBClient(host=influx_host, timeout=10, database=db_name)
+    db_client.drop_measurement("ping")  # clear data in the ping measurement from previous executions of this script
+    print("Writing network latency data to influx..\n")
+    assert db_client.write_points(json_body)  # assert the write method returns True - successful write
+
+    graph = Graph(host=neo4j_host, password=neo4j_password)
+
+    print("Building network links from the ping telegraf plugin in influx")
+    compute_nodes = set([host for host, url, avg_ms, min_ms, max_ms in data])
+    # retrieve all network latencies available from the influx ping table
+    for network_link in permutations(compute_nodes, 2):
+        from_node_name, to_node_name = network_link
+        from_node = graph.nodes.match("ComputeNode", name=from_node_name).first()
+        if from_node is None:
+            from_node = Node("ComputeNode", name=from_node_name)
+            graph.create(from_node)
+
+        to_node = graph.nodes.match("ComputeNode", name=to_node_name).first()
+        if to_node is None:
+            to_node = Node("ComputeNode", name=to_node_name)
+            graph.create(to_node)
+
+        # query = 'SELECT mean(*) FROM "CLMCMetrics"."autogen"."ping" WHERE host=\'{0}\' and url=\'{1}\' and time>={2} and time<{3}'.format(from_node['name'], to_node['name'], from_timestamp, to_timestamp)
+        # In future when latencies are reported continuously, we should put timestamp filtering in the query for network links
+        query = 'SELECT mean(*) FROM "CLMCMetrics"."autogen"."ping" WHERE host=\'{0}\' and url=\'{1}\''.format(from_node['name'], to_node['name'])
+        print("Executing query: {0}".format(query))
+
+        result = db_client.query(query)  # execute the query
+        # get the dictionary of result points; the next() function just gets the first element of the query results generator (we only expect one item in the generator)
+        try:
+            actual_result = next(result.get_points())
+            latency = actual_result.get("mean_average_response_ms")/2
+            if graph.relationships.match(nodes=(from_node, to_node), r_type="linkedTo").first() is None:
+                edge = Relationship(from_node, "linkedTo", to_node, latency=latency)
+                graph.create(edge)
+        except StopIteration:
+            # in this case there is no such link reported to Influx
+            print("There is no direct link between {0} and {1}".format(from_node, to_node))
+
+
+if __name__ == "__main__":
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], "h:d:p:", ['host=', 'database=', 'path='])
+    except getopt.GetoptError:
+        print('generate_network_measurements.py -h <influx host> -d <influx database> -p <network configuration file path>')
+        sys.exit(1)
+
+    if len(opts) != 3:
+        print('generate_network_measurements.py -h <influx host> -d <influx database> -p <network configuration file path>')
+        sys.exit(1)
+
+    db_host, database, path = None, None, None
+    # Apply options, if any
+    for opt, arg in opts:
+        if opt in ('-h', '--host'):
+            db_host = arg
+        elif opt in ('-d', '--database'):
+            database = arg
+        elif opt in ('-p', '--path'):
+            path = arg
+
+    if all([db_host is not None, database is not None, path is not None]):
+        with open(path) as fh:
+            json_data = load(fh)
+
+        report_network_measurements(db_host, database, json_data, db_host, "admin")
diff --git a/src/service/clmcservice/graphapi/__init__.py b/src/service/clmcservice/graphapi/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/service/clmcservice/graphapi/conftest.py b/src/service/clmcservice/graphapi/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..e36ac06f72af6e96dc17dcb2ec5cee5efb611aee
--- /dev/null
+++ b/src/service/clmcservice/graphapi/conftest.py
@@ -0,0 +1,219 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          09-07-2018
+//      Created for Project :   FLAME
+"""
+
+import pytest
+from influxdb import InfluxDBClient
+from clmcservice.generate_network_measurements import report_network_measurements
+from py2neo import Graph
+
+
+# static network configuration data used for testing cases
+network_config = {
+    "bandwidth": 104857600,
+    "links": [
+        {
+            "source": "DC1",
+            "target": "DC2",
+            "min_response_time": 10,
+            "max_response_time": 20,
+            "avg_response_time": 15
+        },
+        {
+            "source": "DC2",
+            "target": "DC1",
+            "min_response_time": 16,
+            "max_response_time": 28,
+            "avg_response_time": 22
+        },
+        {
+            "source": "DC1",
+            "target": "DC3",
+            "min_response_time": 17,
+            "max_response_time": 19,
+            "avg_response_time": 18
+        },
+        {
+            "source": "DC3",
+            "target": "DC1",
+            "min_response_time": 15,
+            "max_response_time": 25,
+            "avg_response_time": 20
+        },
+        {
+            "source": "DC1",
+            "target": "DC5",
+            "min_response_time": 27,
+            "max_response_time": 33,
+            "avg_response_time": 30
+        },
+        {
+            "source": "DC5",
+            "target": "DC1",
+            "min_response_time": 10,
+            "max_response_time": 42,
+            "avg_response_time": 26
+        },
+        {
+            "source": "DC2",
+            "target": "DC4",
+            "min_response_time": 11,
+            "max_response_time": 29,
+            "avg_response_time": 20
+        },
+        {
+            "source": "DC4",
+            "target": "DC2",
+            "min_response_time": 12,
+            "max_response_time": 40,
+            "avg_response_time": 26
+        },
+        {
+            "source": "DC3",
+            "target": "DC4",
+            "min_response_time": 23,
+            "max_response_time": 27,
+            "avg_response_time": 25
+        },
+        {
+            "source": "DC4",
+            "target": "DC3",
+            "min_response_time": 12,
+            "max_response_time": 18,
+            "avg_response_time": 15
+        },
+        {
+            "source": "DC5",
+            "target": "DC6",
+            "min_response_time": 3,
+            "max_response_time": 15,
+            "avg_response_time": 9
+        },
+        {
+            "source": "DC6",
+            "target": "DC5",
+            "min_response_time": 11,
+            "max_response_time": 11,
+            "avg_response_time": 11
+        },
+    ]
+}
+
+
+@pytest.fixture(scope='module', autouse=True)
+def db_testing_data():
+    """
+    This fixture generates testing data in influx to be used in the various test methods, after which it clears up the neo4j and influx databases.
+
+    :return: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object
+    """
+
+    global network_config
+
+    test_db_name = "TestInfluxDB"
+
+    # ASSUMES both Influx and Neo4j are running on localhost with default ports
+    influx = InfluxDBClient(host="localhost", port=8086, timeout=10)
+    graph = Graph(host="localhost", password="admin")
+    graph.delete_all()  # clear the graph db before testing
+
+    # create the physical infrastructure subgraph
+    dbs = influx.get_list_database()
+    if "CLMCMetrics" not in dbs:
+        influx.create_database("CLMCMetrics")
+    report_network_measurements("localhost", "CLMCMetrics", network_config, "localhost", "admin")
+
+    # check if exists ( if so, clear ) or create the test DB in influx
+    if test_db_name in dbs:
+        influx.drop_database(test_db_name)
+    influx.create_database(test_db_name)
+    influx.switch_database(test_db_name)
+
+    # time range for which the data is reported
+    from_timestamp = 1528385860
+    to_timestamp = 1528685860
+
+    # nginx data to report to influx
+    data = [
+        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 5, 20, 1500, 15000, 1528385860),
+        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 8, 35, 1000, 11000, 1528385860),
+        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 7, 15, 2300, 10000, 1528389860),
+        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 10, 23, 98000, 1200, 1528389860),
+        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 12, 17, 2000, 7500, 1528395860),
+        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 15, 11, 1300, 6700, 1528395860),
+        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 17, 23, 3000, 8300, 1528485860),
+        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 19, 24, 76000, 1200, 1528485860),
+        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 11, 16, 2500, 7500, 1528545860),
+        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 20, 18, 1700, 12000, 1528545860)
+    ]
+    influx.write_points([
+        {"measurement": "nginx",
+         "tags": {"host": host, "ipendpoint": endpoint, "location": location, "sf": sf, "sf_i": sf_i, "sfc": sfc, "sfc_i": sfc_i, "sr": sr},
+         "fields": {"requests": num_requests, "avg_processing_time": processing_time, "avg_request_size": request_size, "avg_response_size": response_size},
+         "time": timestamp * 10 ** 9
+         } for host, endpoint, location, sf, sf_i, sfc, sfc_i, sr, num_requests, processing_time, request_size, response_size, timestamp in data
+    ])
+
+    # minio data to report to influx
+    data = [
+        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 12, 86, 101000, 4700, 1528386860),
+        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 15, 75, 96000, 6300, 1528386860),
+        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 7, 105, 5200, 89200, 1528388860),
+        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 12, 60, 76900, 2100, 1528388860),
+        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 11, 121, 99500, 3500, 1528410860),
+        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 12, 154, 2700, 111000, 1528410860),
+        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 14, 84, 1100, 4300, 1528412860),
+        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 5, 45, 1200, 3200, 1528412860),
+        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 7, 63, 87000, 2000, 1528414860),
+        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 16, 86, 3100, 94000, 1528414860)
+    ]
+    influx.write_points([
+        {"measurement": "minio_http",
+         "tags": {"host": host, "ipendpoint": endpoint, "location": location, "sf": sf, "sf_i": sf_i, "sfc": sfc, "sfc_i": sfc_i, "sr": sr},
+         "fields": {"total_requests_count": num_requests, "total_processing_time": processing_time, "total_requests_size": request_size, "total_response_size": response_size},
+         "time": timestamp * 10 ** 9
+         } for host, endpoint, location, sf, sf_i, sfc, sfc_i, sr, num_requests, processing_time, request_size, response_size, timestamp in data
+    ])
+
+    # apache data to report to influx
+    data = [
+        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 15, 1400, 15600, 1528386860),
+        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 17, 2200, 11200, 1528388860),
+        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 19, 700, 5700, 1528410860),
+        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 24, 1900, 4300, 1528412860),
+        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 13, 1200, 2500, 1528414860),
+    ]
+    influx.write_points([
+        {"measurement": "apache",
+         "tags": {"host": host, "ipendpoint": endpoint, "location": location, "sf": sf, "sf_i": sf_i, "sfc": sfc, "sfc_i": sfc_i, "sr": sr},
+         "fields": {"avg_processing_time": processing_time, "avg_request_size": request_size, "avg_response_size": response_size},
+         "time": timestamp * 10 ** 9
+         } for host, endpoint, location, sf, sf_i, sfc, sfc_i, sr, processing_time, request_size, response_size, timestamp in data
+    ])
+
+    yield from_timestamp, to_timestamp, test_db_name, graph
+
+    # clean up after the test is over - delete the test databases and clear up the graph
+    influx.drop_database("CLMCMetrics")
+    influx.drop_database("TestInfluxDB")
+    graph.delete_all()
diff --git a/src/service/clmcservice/graphapi/tests.py b/src/service/clmcservice/graphapi/tests.py
new file mode 100644
index 0000000000000000000000000000000000000000..9dcd23eaa93819f0743dcab77756969ac53d9fe5
--- /dev/null
+++ b/src/service/clmcservice/graphapi/tests.py
@@ -0,0 +1,457 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          09-07-2018
+//      Created for Project :   FLAME
+"""
+
+from json import dumps
+import pytest
+from pyramid import testing
+from clmcservice.graphapi.views import GraphAPI
+from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound
+
+
+graph_1_id = None
+graph_2_id = None
+
+
+class TestGraphAPI(object):
+    """
+    A pytest-implementation test for the Graph API endpoints.
+    """
+
+    @pytest.fixture(autouse=True)
+    def app_config(self):
+        """
+        A fixture to implement setUp/tearDown functionality for all tests by initializing configuration structure for the web service and db connection
+        """
+
+        self.registry = testing.setUp()
+        self.registry.add_settings({"neo4j_host": "localhost", "neo4j_password": "admin", "influx_host": "localhost", "influx_port": 8086, "network_bandwidth": 104857600})
+
+        yield
+
+        testing.tearDown()
+
+    @pytest.mark.parametrize("body, from_timestamp, to_timestamp, error_msg", [
+        (None, None, None, "A bad request error must have been raised in case of missing request body."),
+        ('{}', 12341412, 1234897, "A bad request error must have been raised in case of invalid request body."),
+        ('{"database": "CLMCMetrics", "retention_policy": "autogen", "service_function_chain_instance": "sfc_i"}', 12341412, 1234897, "A bad request error must have been raised in case of invalid request body."),
+        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": "{invalid_json}"}', 1528386860, 1528389860, "A bad request error must have been raised in case of invalid request body."),
+        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": ["nginx", "minio"]}', 1528386860, 1528389860, "A bad request error must have been raised in case of invalid request body."),
+        ('{"retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         1528386860, 1528389860, "A bad request error must have been raised in case of missing database value in the request body"),
+        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_id", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         1528386860, 1528389860, "A bad request error must have been raised in case of invalid sfc_i ID in the request body"),
+        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "testsfc1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         1528386860, 1528389860, "A bad request error must have been raised in case of invalid sfc_i ID in the request body"),
+        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         "not a timestamp", "not a timestamp", "A bad request error must have been raised in case of invalid URL parameters."),
+        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         None, "not a timestamp", "A bad request error must have been raised in case of invalid URL parameters."),
+        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         2131212, None, "A bad request error must have been raised in case of invalid URL parameters."),
+        ('{"database": "DB-not-exist", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         2131212, 2131212, "A bad request error must have been raised in case of a non-existing database."),
+        ('{"database": "TestInfluxDB", "retention_policy": "autogen-invalid", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
+         2131212, 2131212, "A bad request error must have been raised in case of a non-existing retention policy."),
+    ])
+    def test_build_error_handling(self, body, from_timestamp, to_timestamp, error_msg):
+        """
+        Tests the error handling of the graph build API endpoint by passing erroneous input and confirming an HTTPBadRequest was returned.
+
+        :param body: body of the request to test
+        :param from_timestamp: the 'from' URL param
+        :param to_timestamp: the 'to' URL param
+        :param error_msg: the error message to pass in case of an error not being properly handled by the API endpoint (in other words, a test failure)
+        """
+
+        request = testing.DummyRequest()
+        if body is not None:
+            request.body = body
+        request.body = request.body.encode(request.charset)
+        if from_timestamp is not None:
+            request.params["from"] = from_timestamp
+        if to_timestamp is not None:
+            request.params["to"] = to_timestamp
+        error_raised = False
+        try:
+            GraphAPI(request).build_temporal_graph()
+        except HTTPBadRequest:
+            error_raised = True
+        assert error_raised, error_msg
+
+    def test_build(self, db_testing_data):
+        """
+        Tests the graph build API endpoint - it makes 2 API calls and checks that the expected graph was created (the influx data that's being used is reported to InfluxDB in the conftest file)
+
+        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest)
+        """
+
+        global graph_1_id, graph_2_id  # these variables are used to store the ID of the graphs that were created during the execution of this test method; they are reused later when testing the delete method
+
+        from_timestamp, to_timestamp, test_db_name, graph_db = db_testing_data
+
+        dc_nodes = set([node["name"] for node in graph_db.nodes.match("ComputeNode")])
+        assert dc_nodes == set("DC" + str(i) for i in range(1, 7)), "Compute nodes must have been created by the db_testing_data fixture"
+
+        # test with invalid URL parameters naming
+        service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)",
+                                        "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
+                                 minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
+                                        "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
+                                 apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
+                                         "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"})
+        body = dumps(dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="sfc_1", service_functions=service_functions))
+        request = testing.DummyRequest()
+        request.params["from_timestamp"] = 12341412
+        request.params["to_timestamp"] = 12341412
+        request.body = body.encode(request.charset)
+        error_raised = False
+        try:
+            GraphAPI(request).build_temporal_graph()
+        except HTTPBadRequest:
+            error_raised = True
+        assert error_raised, "A bad request error must have been raised in case of invalid URL parameters."
+
+        # Create a valid build request and send it to the API endpoint
+        service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)",
+                                        "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
+                                 minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
+                                        "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"})
+        build_json_body = dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="test_sfc1_1", service_functions=service_functions)
+        body = dumps(build_json_body)
+        request = testing.DummyRequest()
+        request.params["from"] = from_timestamp
+        request.params["to"] = to_timestamp
+        request.body = body.encode(request.charset)
+        response = GraphAPI(request).build_temporal_graph()
+        graph_subresponse = response.pop("graph")
+        assert response == build_json_body, "Response must contain the request body"
+        assert graph_subresponse.get("uuid") is not None, "Request UUID must be attached to the response."
+        request_id = graph_subresponse["uuid"]
+        graph_1_id = request_id
+
+        # check that the appropriate nodes have been created
+        sf_names = set([node["name"] for node in graph_db.nodes.match("ServiceFunction")])
+        assert sf_names == {"nginx", "minio"}, "The graph must contain 2 service functions - nginx and minio"
+        sf_i_names = set([node["name"] for node in graph_db.nodes.match("ServiceFunctionInstance")])
+        assert sf_i_names == {"nginx_1", "minio_1"}, "The graph must contain 2 service function instances - nginx_1 and minio_1"
+        endpoints = set([node["name"] for node in graph_db.nodes.match("Endpoint", uuid=request_id)])
+        assert endpoints == {"minio_1_ep1", "nginx_1_ep1", "nginx_1_ep2"}, "The graph must contain 3 endpoints - minio_1_ep1, nginx_1_ep1, nginx_1_ep2"
+        sfc_i_names = set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChainInstance")])
+        assert sfc_i_names == {"test_sfc1_1"}, "The graph must contain 1 service function chain instance - test_sfc1_1"
+        sfc_names = set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChain")])
+        assert sfc_names == {"test_sfc1"}, "The graph must contain 1 service function chain - test_sfc1"
+
+        reference_node = graph_db.nodes.match("Reference", uuid=request_id, sfc_i="test_sfc1_1", sfc="test_sfc1").first()
+        assert reference_node is not None and reference_node["from"] == from_timestamp * 10**9 and reference_node["to"] == to_timestamp * 10**9, "Reference node must have been created"
+
+        # check the appropriate edges have been created
+        self.check_exist_relationship(
+            (
+                ("minio_1_ep1", "Endpoint", "DC4", "ComputeNode", "hostedBy"),
+                ("nginx_1_ep1", "Endpoint", "DC4", "ComputeNode", "hostedBy"),
+                ("nginx_1_ep2", "Endpoint", "DC6", "ComputeNode", "hostedBy"),
+                ("minio_1", "ServiceFunctionInstance", "minio_1_ep1", "Endpoint", "realisedBy"),
+                ("nginx_1", "ServiceFunctionInstance", "nginx_1_ep1", "Endpoint", "realisedBy"),
+                ("nginx_1", "ServiceFunctionInstance", "nginx_1_ep2", "Endpoint", "realisedBy"),
+                ("minio_1", "ServiceFunctionInstance", "minio", "ServiceFunction", "instanceOf"),
+                ("nginx_1", "ServiceFunctionInstance", "test_sfc1_1", "ServiceFunctionChainInstance", "utilizedBy"),
+                ("minio_1", "ServiceFunctionInstance", "test_sfc1_1", "ServiceFunctionChainInstance", "utilizedBy"),
+                ("nginx", "ServiceFunction", "test_sfc1", "ServiceFunctionChain", "utilizedBy"),
+                ("minio", "ServiceFunction", "test_sfc1", "ServiceFunctionChain", "utilizedBy"),
+                ("test_sfc1_1", "ServiceFunctionChainInstance", "test_sfc1", "ServiceFunctionChain", "instanceOf"),
+            ), graph_db, request_id
+        )
+
+        # check endpoint nodes have the correct properties
+        for endpoint, response_time, request_size, response_size in (("minio_1_ep1", 9, 5760, 2033), ("nginx_1_ep1", 18.2, 2260, 9660), ("nginx_1_ep2", 22.2, 35600, 6420)):
+            endpoint_node = graph_db.nodes.match("Endpoint", name=endpoint, uuid=request_id).first()
+            assert endpoint_node["response_time"] == response_time, "Wrong response time property of endpoint node"
+            # approximation is used to avoid long float numbers retrieved from influx, the test case ensures the results are different enough so that approximation of +-1 is good enough for testing
+            assert endpoint_node["request_size"] == pytest.approx(request_size, 1), "Wrong request size attribute of endpoint node"
+            assert endpoint_node["response_size"] == pytest.approx(response_size, 1), "Wrong response size attribute of endpoint node"
+
+        # send a new request for a new service function chain and check the new subgraph has been created
+        service_functions = dict(minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
+                                        "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
+                                 apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
+                                         "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"})
+        build_json_body = dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="test_sfc2_1", service_functions=service_functions)
+        body = dumps(build_json_body)
+        request = testing.DummyRequest()
+        request.params["from"] = from_timestamp
+        request.params["to"] = to_timestamp
+        request.body = body.encode(request.charset)
+        response = GraphAPI(request).build_temporal_graph()
+        graph_subresponse = response.pop("graph")
+        assert response == build_json_body, "Response must contain the request body"
+        assert graph_subresponse.get("uuid") is not None, "Request UUID must be attached to the response."
+        request_id = graph_subresponse["uuid"]
+        graph_2_id = request_id
+
+        # check the new nodes have been created
+        assert graph_db.nodes.match("ServiceFunction", name="apache").first() is not None, "Service function apache must have been added to the graph"
+
+        for sf_i in ("apache_1", "minio_2"):
+            assert graph_db.nodes.match("ServiceFunctionInstance", name=sf_i).first() is not None, "Service function instance {0} must have been added to the graph".format(sf_i)
+
+        for ep in ("minio_2_ep1", "apache_1_ep1"):
+            assert graph_db.nodes.match("Endpoint", name=ep, uuid=request_id).first() is not None, "Endpoint {0} must have been added to the graph".format(ep)
+
+        assert graph_db.nodes.match("ServiceFunctionChainInstance", name="test_sfc2_1").first() is not None, "Service function chain instance test_sfc2_1 must have been added to the graph"
+        assert graph_db.nodes.match("ServiceFunctionChain", name="test_sfc2").first() is not None, "Service function chain test_sfc2 must have been added to the graph"
+
+        reference_node = graph_db.nodes.match("Reference", uuid=request_id, sfc_i="test_sfc2_1", sfc="test_sfc2").first()
+        assert reference_node is not None and reference_node["from"] == from_timestamp * 10**9 and reference_node["to"] == to_timestamp * 10**9, "Reference node must have been created"
+
+        # check the appropriate edges have been created
+        self.check_exist_relationship(
+            (
+                ("minio_2_ep1", "Endpoint", "DC5", "ComputeNode", "hostedBy"),
+                ("apache_1_ep1", "Endpoint", "DC5", "ComputeNode", "hostedBy"),
+                ("minio_2", "ServiceFunctionInstance", "minio_2_ep1", "Endpoint", "realisedBy"),
+                ("apache_1", "ServiceFunctionInstance", "apache_1_ep1", "Endpoint", "realisedBy"),
+                ("minio_2", "ServiceFunctionInstance", "minio", "ServiceFunction", "instanceOf"),
+                ("apache_1", "ServiceFunctionInstance", "apache", "ServiceFunction", "instanceOf"),
+                ("minio_2", "ServiceFunctionInstance", "test_sfc2_1", "ServiceFunctionChainInstance", "utilizedBy"),
+                ("apache_1", "ServiceFunctionInstance", "test_sfc2_1", "ServiceFunctionChainInstance", "utilizedBy"),
+                ("minio", "ServiceFunction", "test_sfc2", "ServiceFunctionChain", "utilizedBy"),
+                ("apache", "ServiceFunction", "test_sfc2", "ServiceFunctionChain", "utilizedBy"),
+                ("test_sfc2_1", "ServiceFunctionChainInstance", "test_sfc2", "ServiceFunctionChain", "instanceOf")
+            ), graph_db, request_id
+        )
+
+        # check endpoint nodes have the correct properties
+        for endpoint, response_time, request_size, response_size in (("minio_2_ep1", 7, 2998, 3610), ("apache_1_ep1", 17.6, 1480, 7860)):
+            endpoint_node = graph_db.nodes.match("Endpoint", name=endpoint, uuid=request_id).first()
+            assert endpoint_node["response_time"] == response_time, "Wrong response time property of endpoint node"
+            # approximation is used to avoid long float numbers retrieved from influx, the test case ensures the results are different enough so that approximation of +-1 is good enough for testing
+            assert endpoint_node["request_size"] == pytest.approx(request_size, 1), "Wrong request size attribute of endpoint node"
+            assert endpoint_node["response_size"] == pytest.approx(response_size, 1), "Wrong response size attribute of endpoint node"
+
+    def test_delete(self, db_testing_data):
+        """
+        Tests the delete API endpoint of the Graph API - the test depends on the build test to have been passed successfully so that graph_1_id and graph_2_id have been set
+
+        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest)
+        """
+
+        global graph_1_id, graph_2_id
+
+        from_timestamp, to_timestamp, test_db_name, graph_db = db_testing_data
+
+        request = testing.DummyRequest()
+        request.matchdict["graph_id"] = "invalid_graph_id"
+        error_raised = False
+        try:
+            GraphAPI(request).delete_temporal_graph()
+        except HTTPNotFound:
+            error_raised = True
+        assert error_raised, "HTTP Not Found error must be raised in case of unrecognized subgraph ID"
+
+        # delete the graph associated with graph_1_id
+        request = testing.DummyRequest()
+        request.matchdict["graph_id"] = graph_1_id
+        response = GraphAPI(request).delete_temporal_graph()
+        assert response == {"uuid": graph_1_id, "deleted": 4}, "Incorrect response when deleting temporal graph"
+
+        # delete the graph associated with graph_2_id
+        request = testing.DummyRequest()
+        request.matchdict["graph_id"] = graph_2_id
+        response = GraphAPI(request).delete_temporal_graph()
+        assert response == {"uuid": graph_2_id, "deleted": 3}, "Incorrect response when deleting temporal graph"
+
+        assert len(graph_db.nodes.match("Endpoint")) == 0, "All endpoint nodes should have been deleted"
+        assert set([node["name"] for node in graph_db.nodes.match("ComputeNode")]) == set(["DC" + str(i) for i in range(1, 7)]), "Compute nodes must not be deleted"
+        assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionInstance")]) == {"nginx_1", "apache_1", "minio_1", "minio_2"}, "Service function instances must not be deleted."
+        assert set([node["name"] for node in graph_db.nodes.match("ServiceFunction")]) == {"nginx", "minio", "apache"}, "Service functions must not be deleted"
+
+    @pytest.mark.parametrize("graph_id, endpoint, compute_node, error_type, error_msg", [
+        ('e8cd4768-47dd-48cd-9c74-7f8926ddbad8', None, None, HTTPBadRequest, "HTTP Bad Request must be thrown in case of missing or invalid url parameters"),
+        ('e8cd4768-47dd-48cd-9c74-7f8926ddbad8', None, "nginx", HTTPBadRequest, "HTTP Bad Request must be thrown in case of missing or invalid url parameters"),
+        ('e8cd4768-47dd-48cd-9c74-7f8926ddbad8', "nginx_1_ep1", None, HTTPBadRequest, "HTTP Bad Request must be thrown in case of missing or invalid url parameters"),
+        ('random-uuid', "nginx_1_ep1", "nginx", HTTPNotFound, "HTTP Not Found error must be thrown for an endpoint node with incorrect request ID"),
+        ('random-uuid', "minio_1_ep1", "minio", HTTPNotFound, "HTTP Not Found error must be thrown for an endpoint node with incorrect request ID"),
+    ])
+    def test_rtt_error_handling(self, graph_id, endpoint, compute_node, error_type, error_msg):
+        """
+        Tests the error handling of the graph round trip time API endpoint - achieved by sending erroneous input in the request and verifying the appropriate error type has been returned.
+
+        :param graph_id: the UUID of the subgraph
+        :param endpoint: endpoint ID
+        :param compute_node: compute node ID
+        :param error_type: error type to expect as a response
+        :param error_msg: error message in case of a test failure
+        """
+
+        request = testing.DummyRequest()
+        request.matchdict["graph_id"] = graph_id
+        if endpoint is not None:
+            request.params["endpoint"] = endpoint
+        if compute_node is not None:
+            request.params["compute_node"] = compute_node
+        error_raised = False
+        try:
+            GraphAPI(request).run_rtt_query()
+        except error_type:
+            error_raised = True
+        assert error_raised, error_msg
+
+    def test_rtt(self, db_testing_data):
+        """
+        Tests the rtt API endpoint of the Graph API.
+
+        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest)
+        """
+
+        from_timestamp, to_timestamp, test_db_name, graph_db = db_testing_data
+
+        # create a graph to use for RTT test by using the build API endpoint
+        service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)",
+                                        "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
+                                 minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
+                                        "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"})
+        build_json_body = dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="test_sfc1_1", service_functions=service_functions)
+        body = dumps(build_json_body)
+        request = testing.DummyRequest()
+        request.params["from"] = from_timestamp
+        request.params["to"] = to_timestamp
+        request.body = body.encode(request.charset)
+        response = GraphAPI(request).build_temporal_graph()
+        graph_subresponse = response.pop("graph")
+        assert response == build_json_body, "Response must contain the request body"
+        assert graph_subresponse.get("uuid") is not None, "Request UUID must be attached to the response."
+        request_id = graph_subresponse["uuid"]
+
+        # test some more error case handling of the RTT API endpoint
+        request = testing.DummyRequest()
+        request.matchdict["graph_id"] = request_id
+        request.params["endpoint"] = "nginx_1_ep1"
+        request.params["compute"] = "DC1"
+        error_raised = False
+        try:
+            GraphAPI(request).run_rtt_query()
+        except HTTPBadRequest:
+            error_raised = True
+        assert error_raised, "HTTP Bad Request must be thrown in case of missing or invalid url parameters"
+
+        request = testing.DummyRequest()
+        request.matchdict["graph_id"] = request_id
+        request.params["endpoint"] = "nginx_1_ep1"
+        request.params["compute_node"] = "DC0"
+        error_raised = False
+        try:
+            GraphAPI(request).run_rtt_query()
+        except HTTPNotFound:
+            error_raised = True
+        assert error_raised, "HTTP Not Found error must be thrown for non existing compute node"
+
+        request = testing.DummyRequest()
+        request.matchdict["graph_id"] = request_id
+        request.params["endpoint"] = "apache_1_ep1"
+        request.params["compute_node"] = "DC1"
+        error_raised = False
+        try:
+            GraphAPI(request).run_rtt_query()
+        except HTTPNotFound:
+            error_raised = True
+        assert error_raised, "HTTP Not Found error must be thrown for a non existing endpoint"
+
+        # go through the set of input/output (expected) parameters and assert actual results match with expected ones
+        for dc, endpoint, forward_latencies, reverse_latencies, response_time, request_size, response_size, rtt, global_tags in (
+            ("DC6", "nginx_1_ep2", [], [], 22.2, 35600, 6420, 22.2, {"location": "DC6", "sr": "sr6", "ipendpoint": "nginx_1_ep2", "host": "host2", "sfc": "test_sfc1", "sfc_i": "test_sfc1_1", "sf": "nginx", "sf_i": "nginx_1"}),
+            ("DC2", "nginx_1_ep2", [11, 15, 4.5], [5.5, 13, 7.5], 22.2, 35600, 6420, 78, {"location": "DC6", "sr": "sr6", "ipendpoint": "nginx_1_ep2", "host": "host2", "sfc": "test_sfc1", "sfc_i": "test_sfc1_1", "sf": "nginx", "sf_i": "nginx_1"}),
+            ("DC3", "nginx_1_ep1", [12.5], [7.5], 18.2, 2260, 9660, 38, {"location": "DC4", "sr": "sr4", "ipendpoint": "nginx_1_ep1", "host": "host1", "sfc": "test_sfc1", "sfc_i": "test_sfc1_1", "sf": "nginx", "sf_i": "nginx_1"})
+        ):
+            request = testing.DummyRequest()
+            request.matchdict["graph_id"] = request_id
+            request.params["endpoint"] = endpoint
+            request.params["compute_node"] = dc
+            response = GraphAPI(request).run_rtt_query()
+            # approximation is used to avoid long float numbers retrieved from influx, the test case ensures the results are different enough so that approximation of +-1 is good enough for testing
+            assert response.pop("round_trip_time") == pytest.approx(rtt, 1), "Incorrect RTT response"
+            assert response == {"forward_latencies": forward_latencies, "reverse_latencies": reverse_latencies, "total_forward_latency": sum(forward_latencies), "total_reverse_latency": sum(reverse_latencies),
+                                "bandwidth": 104857600, "response_time": response_time, "global_tags": global_tags,
+                                "request_size": request_size, "response_size": response_size}, "Incorrect RTT response"
+
+        # send a new request for a new service function chain to create a second subgraph to test
+        service_functions = dict(minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
+                                        "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
+                                 apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
+                                         "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"})
+        build_json_body = dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="test_sfc2_1", service_functions=service_functions)
+        body = dumps(build_json_body)
+        request = testing.DummyRequest()
+        request.params["from"] = from_timestamp
+        request.params["to"] = to_timestamp
+        request.body = body.encode(request.charset)
+        response = GraphAPI(request).build_temporal_graph()
+        graph_subresponse = response.pop("graph")
+        assert response == build_json_body, "Response must contain the request body"
+        assert graph_subresponse.get("uuid") is not None, "Request UUID must be attached to the response."
+        request_id = graph_subresponse["uuid"]
+
+        # go through the set of input/output (expected) parameters and assert actual results match with expected ones
+        for dc, endpoint, forward_latencies, reverse_latencies, response_time, request_size, response_size, rtt, global_tags in (
+            ("DC5", "apache_1_ep1", [], [], 17.6, 1480, 7860, 17.6, {"location": "DC5", "sr": "sr5", "ipendpoint": "apache_1_ep1", "host": "host5", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "apache", "sf_i": "apache_1"}),
+            ("DC5", "minio_2_ep1", [], [], 7, 2998, 3610, 7, {"location": "DC5", "sr": "sr5", "ipendpoint": "minio_2_ep1", "host": "host4", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "minio", "sf_i": "minio_2"}),
+            ("DC3", "apache_1_ep1", [10, 15], [13, 9], 17.6, 1480, 7860, 64, {"location": "DC5", "sr": "sr5", "ipendpoint": "apache_1_ep1", "host": "host5", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "apache", "sf_i": "apache_1"}),
+            ("DC2", "minio_2_ep1", [11, 15], [13, 7.5], 7, 2998, 3610, 53, {"location": "DC5", "sr": "sr5", "ipendpoint": "minio_2_ep1", "host": "host4", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "minio", "sf_i": "minio_2"})
+        ):
+            request = testing.DummyRequest()
+            request.matchdict["graph_id"] = request_id
+            request.params["endpoint"] = endpoint
+            request.params["compute_node"] = dc
+            response = GraphAPI(request).run_rtt_query()
+            # approximation is used to avoid long float numbers retrieved from influx, the test case ensures the results are different enough so that approximation of +-1 is good enough for testing
+            assert response.pop("request_size") == pytest.approx(request_size, 1), "Incorrect RTT response"
+            assert response.pop("response_size") == pytest.approx(response_size, 1), "Incorrect RTT response"
+            assert response.pop("round_trip_time") == pytest.approx(rtt, 1), "Incorrect RTT response"
+            assert response == {"forward_latencies": forward_latencies, "reverse_latencies": reverse_latencies, "total_forward_latency": sum(forward_latencies), "total_reverse_latency": sum(reverse_latencies),
+                                "bandwidth": 104857600, "response_time": response_time, "global_tags": global_tags}, "Incorrect RTT response"
+
+    @staticmethod
+    def check_exist_relationship(relationships_tuple, graph, uuid):
+        """
+        Iterates through a tuple of relationships and checks that each of those exists - a utility method to be reused for testing.
+
+        :param relationships_tuple: the tuple to iterate
+        :param graph: the graph object
+        :param uuid: the uuid of the request
+        """
+
+        for relationship in relationships_tuple:
+            from_node_name, from_node_type, to_node_name, to_node_type, relationship_type = relationship
+            if from_node_type == "Endpoint":
+                from_node = graph.nodes.match(from_node_type, name=from_node_name, uuid=uuid).first()
+            else:
+                from_node = graph.nodes.match(from_node_type, name=from_node_name).first()
+            assert from_node is not None  # IMPORTANT, assert the from_node exists, otherwise the py2neo RelationshipMatcher object assumes you are looking for any node (instead of raising an error)
+
+            if to_node_type == "Endpoint":
+                to_node = graph.nodes.match(to_node_type, name=to_node_name, uuid=uuid).first()
+            else:
+                to_node = graph.nodes.match(to_node_type, name=to_node_name).first()
+            assert to_node is not None  # IMPORTANT, assert the from_node exists, otherwise the py2neo RelationshipMatcher object assumes you are looking for any node (instead of raising an error)
+
+            assert graph.relationships.match(nodes=(from_node, to_node), r_type=relationship_type).first() is not None, "Graph is missing a required relationship"
diff --git a/src/service/clmcservice/graphapi/utilities.py b/src/service/clmcservice/graphapi/utilities.py
new file mode 100644
index 0000000000000000000000000000000000000000..58d1dff2c40c8ec550194d96e887f1ed05d71439
--- /dev/null
+++ b/src/service/clmcservice/graphapi/utilities.py
@@ -0,0 +1,292 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          04-07-2018
+//      Created for Project :   FLAME
+"""
+
+from json import loads
+from py2neo import Node, Relationship
+import logging
+
+
+GRAPH_ROUND_TRIP_TIME_URL_PARAMS = ("compute_node", "endpoint")
+
+GRAPH_BUILD_URL_PARAMS = ("from", "to")
+GRAPH_BUILD_QUERY_PARAMS = {"database", "retention_policy", "service_function_chain_instance", "service_functions"}
+GRAPH_BUILD_SF_QUERY_PARAMS = {"response_time_field", "request_size_field", "response_size_field", "measurement_name"}
+
+INFLUX_QUERY_TEMPLATE = 'SELECT {0} AS mean_response_time, {1} AS mean_request_size, {2} AS mean_response_size FROM "{3}"."{4}".{5} WHERE sfc_i=\'{6}\' and time>={7} and time<{8} GROUP BY ipendpoint, location, sf_i, host, sr'
+
+
+RTT_CYPHER_QUERY_TEMPLATE = """
+MATCH (dc:ComputeNode {{ name: '{0}' }}),(endpoint:Endpoint {{ name: '{1}', uuid: '{2}'}}), 
+path = shortestPath((dc)-[*]-(endpoint))
+WHERE ALL(r IN relationships(path) WHERE type(r)='linkedTo' or type(r)='hostedBy' )
+WITH nodes(path) as all_nodes, endpoint as endpoint
+    WITH all_nodes[0..size(all_nodes)-1] as network_nodes, endpoint as endpoint
+    UNWIND RANGE(0, size(network_nodes) - 2) as id
+    WITH network_nodes[id] as source, network_nodes[id+1] as target, endpoint.response_time as response_time, endpoint.request_size as request_size, endpoint.response_size as response_size
+        MATCH (source) -[r1]-> (target), (target) -[r2]-> (source)
+        RETURN collect(r1.latency) as forward_latencies, reverse(collect(r2.latency)) as reverse_latencies, response_time, request_size, response_size
+"""
+
+
+log = logging.getLogger('service_logger')
+
+
+def validate_json_queries_body(body):
+    """
+    Validates the request body containing mappings from service functions to queries to execute.
+
+    :param body: the request body to validate
+    :return the validated json queries dictionary object
+    :raise AssertionError: if the body is invalid
+    """
+
+    global GRAPH_BUILD_QUERY_PARAMS
+
+    try:
+        body = loads(body)
+    except:
+        raise AssertionError("Configuration must be a JSON object.")
+
+    assert GRAPH_BUILD_QUERY_PARAMS == set(body.keys()), "Invalid JSON query document."
+
+    sfc_i = body["service_function_chain_instance"]
+    sfc_i_subparts = sfc_i.split('_')
+    assert len(sfc_i_subparts) > 1, "Incorrect format of service function chain instance ID - use format <sfcID>_<instanceNum>"
+
+    # check the last part of the sfc_i ID is a number
+    try:
+        int(sfc_i_subparts[-1])
+    except ValueError:
+        assert False, "Incorrect format of service function chain instance ID - use format <sfcID>_<instanceNum>"
+
+    assert type(body["service_functions"]) == dict, "The service function description should be represented with a dictionary."
+
+    for sf in body["service_functions"]:
+        query_data = body["service_functions"][sf]
+        assert type(query_data) == dict, "Each service function must be associated with a respective JSON object."
+        assert GRAPH_BUILD_SF_QUERY_PARAMS == set(query_data.keys()), "Invalid query data for service function {0} in the JSON query document".format(sf)
+
+    return body
+
+
+def validate_graph_url_params(params):
+    """
+    Validates the request url parameters used in building a temporal graph.
+
+    :param params: the parameters dictionary to validate
+    :return: the validated parameters
+    :raise AssertionError: for invalid parameters
+    """
+
+    global GRAPH_BUILD_URL_PARAMS
+
+    url_params = {}
+    for param in GRAPH_BUILD_URL_PARAMS:
+        assert param in params, "Incorrect url parameters - required url query parameter '{0}' is not found in the request parameters.".format(param)
+        url_params[param] = params[param]
+
+    try:
+        # convert timestamps to integers
+        url_params['from'] = int(url_params['from'])
+        url_params['to'] = int(url_params['to'])
+    except ValueError:
+        assert False, "Invalid URL timestamp parameters"
+
+    return url_params
+
+
+def validate_graph_rtt_params(params):
+    """
+    Validates the request url parameters used in running a round trip time cypher query.
+
+    :param params: the parameters dictionary to validate
+    :return: the validated parameters
+    :raise AssertionError: for invalid parameters
+    """
+
+    global GRAPH_ROUND_TRIP_TIME_URL_PARAMS
+
+    url_params = {}
+    for param in GRAPH_ROUND_TRIP_TIME_URL_PARAMS:
+        assert param in params, "Incorrect url parameters - required url query parameter '{0}' is not found in the request parameters.".format(param)
+        url_params[param] = params[param]
+
+    return url_params
+
+
+def find_or_create_node(graph, node_type, **properties):
+    """
+    This function checks if a node of the given type with the given properties exists, and if not - creates it.
+
+    :param graph: the graph object
+    :param node_type: the type of the node to find or create
+    :param properties: the properties of the node to find or create
+    :return: the found or newly created node object
+    """
+
+    if 'uuid' in properties:
+        node = graph.nodes.match(node_type, name=properties['name'], uuid=properties['uuid']).first()
+    else:
+        node = graph.nodes.match(node_type, name=properties['name']).first()
+
+    if node is None:
+        log.info("Creating node of type {0} with properties {1}".format(node_type, properties))
+        node = Node(node_type, **properties)
+        graph.create(node)
+
+    return node
+
+
+def find_or_create_edge(graph, edge_type, from_node, to_node, **properties):
+    """
+    This function checks if an edge of the given type with the given properties exists, and if not - creates it.
+
+    :param graph: the graph object
+    :param edge_type: the type of the edge to find or create
+    :param from_node: the source of the edge
+    :param to_node: the target of the edge
+    :param properties: the properties of the edge to find or create
+    :return: the found or newly created edge object
+    """
+
+    edge = graph.relationships.match(nodes=(from_node, to_node), r_type=edge_type).first()
+    if edge is None:
+        log.info("Creating edge of type {0} from node {1} to node {2} with properties {3}".format(edge_type, from_node, to_node, properties))
+        edge = Relationship(from_node, edge_type, to_node, **properties)
+        graph.create(edge)
+
+    return edge
+
+
+def build_temporal_graph(request_id, from_timestamp, to_timestamp, json_queries, graph, influx_client):
+    """
+    A function used to generate a temporal graph in the neo4j db.
+
+    :param request_id: the ID of the request
+    :param from_timestamp: the start of the time range
+    :param to_timestamp: the end of the time range
+    :param json_queries: the JSON object containing the query data for each service function
+    :param graph: the graph DB object
+    :param influx_client: the influx DB client object
+    """
+
+    global INFLUX_QUERY_TEMPLATE
+
+    db = json_queries["database"]
+    rp = json_queries["retention_policy"]
+    sfc_i = json_queries["service_function_chain_instance"]
+
+    log.info("Building graph for service function chain {0} from database {1} with retention policy {2}".format(sfc_i, db, rp))
+
+    sfc = "_".join(sfc_i.split('_')[: -1])  # assumes sfc_i is always in the form <sfc>_<num>
+
+    # create a UUID reference node
+    reference_node = Node("Reference", **{"uuid": request_id, "sfc": sfc, "sfc_i": sfc_i, "from": from_timestamp, "to": to_timestamp})
+    graph.create(reference_node)
+
+    # create a node for the service function chain if it doesn't exist
+    service_function_chain_node = find_or_create_node(graph, "ServiceFunctionChain", name=sfc)
+    # create a node for the service function chain instance if it doesn't exist
+    service_function_chain_instance_node = find_or_create_node(graph, "ServiceFunctionChainInstance", name=sfc_i)
+    # create a instanceOf edge if it doesn't exist
+    find_or_create_edge(graph, "instanceOf", service_function_chain_instance_node, service_function_chain_node)
+
+    compute_nodes = set()  # a set is used to keep track of all compute nodes that are found while building the graph, which is then used to retrieve the network latencies
+
+    # traverse the list of service functions
+    for service_function in json_queries["service_functions"]:
+        query_data = json_queries["service_functions"][service_function]
+
+        response_time_field = query_data["response_time_field"]
+        request_size_field = query_data["request_size_field"]
+        response_size_field = query_data["response_size_field"]
+        measurement = query_data["measurement_name"]
+
+        # build up the query by setting the placeholders in the query template
+        query_to_execute = INFLUX_QUERY_TEMPLATE.format(response_time_field, request_size_field, response_size_field, db, rp, measurement, sfc_i, from_timestamp, to_timestamp)
+
+        # create a node for the service function if it doesn't exist
+        service_function_node = find_or_create_node(graph, "ServiceFunction", name=service_function)
+        # crate a utilizedBy edge between the service function and the service function chain
+        find_or_create_edge(graph, "utilizedBy", service_function_node, service_function_chain_node)
+
+        log.info("Executing query: {0}".format(query_to_execute))
+        result = influx_client.query(query_to_execute)  # execute the query
+
+        # iterate through each result item
+        for item in result.items():
+            metadata, result_points = item  # each result item is a tuple of two elements
+
+            # metadata consists of the result tags and the measurement name
+            # measurement = metadata[0]
+            tags = metadata[1]
+
+            result_point = next(result_points)  # get the result point dictionary
+            response_time = result_point["mean_response_time"]  # extract the response time of the SF from the result
+            request_size = result_point["mean_request_size"]  # extract the avg request size of the SF from the result
+            response_size = result_point["mean_response_size"]  # extract the avg response size of the SF from the result
+
+            # create a ServiceFunctionInstance node from the tag value (if it is not already created)
+            service_function_instance_node = find_or_create_node(graph, "ServiceFunctionInstance", name=tags["sf_i"])
+            # create an edge between the instance and the service function (if it is not already created)
+            find_or_create_edge(graph, "instanceOf", service_function_instance_node, service_function_node)
+            # crate a utilizedBy edge between the service function instance and the service function chain instance
+            find_or_create_edge(graph, "utilizedBy", service_function_instance_node, service_function_chain_instance_node)
+
+            # create an Endpoint node from the tag value (if it is not already created)
+            ipendpoint_node = find_or_create_node(graph, "Endpoint", name=tags["ipendpoint"], response_time=response_time, request_size=request_size, response_size=response_size, uuid=request_id, host=tags["host"], sr=tags["sr"])
+            # create an edge between the instance and the endpoint (if it is not already created)
+            find_or_create_edge(graph, "realisedBy", service_function_instance_node, ipendpoint_node)
+
+            # create a ComputeNode node from the tag value (if it is not already created)
+            compute_node = find_or_create_node(graph, "ComputeNode", name=tags["location"])
+            # create an edge between the endpoint and the compute node (if it is not already created)
+            find_or_create_edge(graph, "hostedBy", ipendpoint_node, compute_node)
+
+            compute_nodes.add(compute_node)  # add the compute node to the set of compute nodes
+
+    log.info("Finished building graph for service function chain {0} from database {1} with retention policy {2}".format(sfc_i, db, rp))
+
+
+def delete_temporal_subgraph(graph, subgraph_id):
+    """
+    A function used to delete a subgraph associated with a subgraph ID obtained from the CLMC service.
+
+    :param graph: the neo4j graph db object
+    :param subgraph_id: the ID of the subgraph delete
+
+    :return: the number of nodes that were matched with the given subgraph ID
+    """
+
+    log.info("Deleting subgraph associated with ID {0}".format(subgraph_id))
+
+    subgraph = graph.nodes.match(uuid=subgraph_id)
+    nodes_matched = 0
+    for node in subgraph:
+        graph.delete(node)
+        nodes_matched += 1
+
+    log.info("Deleted {0} nodes associated with ID {1}".format(nodes_matched, subgraph_id))
+
+    return nodes_matched
diff --git a/src/service/clmcservice/graphapi/views.py b/src/service/clmcservice/graphapi/views.py
new file mode 100644
index 0000000000000000000000000000000000000000..79ff877a8fa47addc0d06f0369a1165b138ccb9b
--- /dev/null
+++ b/src/service/clmcservice/graphapi/views.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          04-07-2018
+//      Created for Project :   FLAME
+"""
+
+
+from clmcservice.graphapi.utilities import validate_json_queries_body, validate_graph_url_params, build_temporal_graph, delete_temporal_subgraph, validate_graph_rtt_params, RTT_CYPHER_QUERY_TEMPLATE
+from uuid import uuid4
+from influxdb import InfluxDBClient
+from py2neo import Graph
+from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound
+from pyramid.view import view_defaults, view_config
+import logging
+
+
+log = logging.getLogger('service_logger')
+
+
+@view_defaults(renderer='json')
+class GraphAPI(object):
+    """
+    A class-based view for  building temporal graphs and running graph queries.
+    """
+
+    def __init__(self, request):
+        """
+        Initialises the instance of the view with the request argument.
+
+        :param request: client's call request
+        """
+
+        self.request = request
+
+    @view_config(route_name='graph_build', request_method='POST')
+    def build_temporal_graph(self):
+        """
+        An API endpoint to build a temporal graph and store it in neo4j based on the posted JSON query document.
+        The request parameters must also include URL query parameters defining the time range for which the graph must be generated.
+
+        :raises HTTPBadRequest: if request body is not a valid JSON with the queries per service function or if request URL doesn't contain the required URL query parameters
+        :return: A JSON document containing the posted request body, along with meta data about the built graph (time range and uuid, which can then be reused for other API calls)
+        """
+
+        try:
+            body = self.request.body.decode(self.request.charset)
+            json_queries = validate_json_queries_body(body)  # validate the content and receive a json dictionary object
+        except AssertionError as e:
+            raise HTTPBadRequest("Bad request content: {0}".format(e.args))
+
+        try:
+            params = validate_graph_url_params(self.request.params)
+        except AssertionError as e:
+            raise HTTPBadRequest("Request URL format is incorrect: {0}".format(e.args))
+
+        graph = Graph(host=self.request.registry.settings['neo4j_host'], password=self.request.registry.settings['neo4j_password'])
+        influx_client = InfluxDBClient(host=self.request.registry.settings['influx_host'], port=self.request.registry.settings['influx_port'], timeout=10)
+
+        database_name = json_queries["database"]
+        if database_name not in [db["name"] for db in influx_client.get_list_database()]:
+            raise HTTPBadRequest("Database {0} not found.".format(database_name))
+
+        retention_policy = json_queries["retention_policy"]
+        if retention_policy not in [rp["name"] for rp in influx_client.get_list_retention_policies(database_name)]:
+            raise HTTPBadRequest("Retention policy {0} for database {1} not found.".format(retention_policy, database_name))
+
+        from_timestamp = params['from'] * 10**9
+        to_timestamp = params['to'] * 10**9
+
+        request_id = str(uuid4())
+
+        build_temporal_graph(request_id, from_timestamp, to_timestamp, json_queries, graph, influx_client)
+        json_queries['graph'] = {"uuid": request_id, "time_range": {"from": from_timestamp, "to": to_timestamp}}
+        return json_queries
+
+    @view_config(route_name='graph_manage', request_method='DELETE')
+    def delete_temporal_graph(self):
+        """
+        An API endpoint to delete a temporal graph associated with a uuid generated by the CLMC service.
+
+        :return: A JSON document containing the UUID of the deleted subgraph
+        :raises HTTPNotFound: if the request is not associated with any subgraph
+        """
+
+        graph_id = self.request.matchdict['graph_id']  # get the UUID of the subgraph from the URL
+        graph = Graph(host=self.request.registry.settings['neo4j_host'], password=self.request.registry.settings['neo4j_password'])  # connect to the neo4j graph db
+
+        if graph.nodes.match("Reference", uuid=graph_id).first() is None:
+            raise HTTPNotFound("No subgraph found associated with the request ID {0}".format(graph_id))
+
+        number_of_deleted_nodes = delete_temporal_subgraph(graph, graph_id)
+        return {"uuid": graph_id, "deleted": number_of_deleted_nodes}
+
+    @view_config(route_name='graph_algorithms_rtt', request_method='GET')
+    def run_rtt_query(self):
+        """
+        An API endpoint to run the round trip time cypher query over the graph associated with a given request ID.
+
+        :return: A JSON response with a list of forward latencies, reverse latencies and SF endpoint response time.
+        :raises HTTPBadRequest: if the request URL doesn't contain the required URL query parameters
+        :raises HTTPNotFound: if the request is not associated with any subgraph or the compute node / endpoint node doesn't exist
+        """
+
+        graph_id = self.request.matchdict['graph_id']  # get the UUID of the subgraph from the URL
+
+        try:
+            params = validate_graph_rtt_params(self.request.params)
+        except AssertionError as e:
+            raise HTTPBadRequest("Request URL format is incorrect: {0}".format(e.args))
+
+        compute_node_label = params["compute_node"]
+        endpoint_node_label = params["endpoint"]
+
+        graph = Graph(host=self.request.registry.settings['neo4j_host'], password=self.request.registry.settings['neo4j_password'])  # connect to the neo4j graph db
+
+        all_nodes = graph.nodes
+
+        reference_node = all_nodes.match("Reference", uuid=graph_id).first()
+        if reference_node is None:
+            raise HTTPNotFound("No subgraph found associated with the request ID {0}".format(graph_id))
+
+        compute_node = all_nodes.match("ComputeNode", name=compute_node_label).first()
+        if compute_node is None:
+            raise HTTPNotFound("Compute node {0} doesn't exist.".format(compute_node_label))
+
+        endpoint_node = all_nodes.match("Endpoint", name=endpoint_node_label, uuid=graph_id).first()
+        if endpoint_node is None:
+            raise HTTPNotFound("Endpoint node {0} doesn't exist.".format(endpoint_node_label))
+
+        # check if the endpoint is hosted by the compute node before running the RTT cypher query
+        hosted_by_node = graph.relationships.match(nodes=(endpoint_node, None), r_type="hostedBy").first().end_node
+        if hosted_by_node["name"] == compute_node["name"]:
+            result = {"forward_latencies": [], "reverse_latencies": [], "response_time": endpoint_node["response_time"],
+                      "request_size": endpoint_node["request_size"], "response_size": endpoint_node["response_size"]}
+        else:
+            query_to_execute = RTT_CYPHER_QUERY_TEMPLATE.format(compute_node_label, endpoint_node_label, graph_id)
+            log.info("Executing cypher query: {0}".format(query_to_execute))
+            data = graph.run(query_to_execute).data()  # returns a list of dictionaries, each dictionary represents a row in the result
+            result = data[0]
+
+        sf_i_node = graph.match(nodes=(None, endpoint_node), r_type="realisedBy").first().start_node
+        if sf_i_node is None:
+            msg = "No service function instance found associated with endpoint {0}".format(endpoint_node["name"])
+            log.error("Unexpected error: {0}".format(msg))
+            raise HTTPBadRequest(msg)
+
+        sf_node = graph.match(nodes=(sf_i_node, None), r_type="instanceOf").first().end_node
+        if sf_node is None:
+            msg = "No service function found associated with service function instance {0}".format(sf_i_node["name"])
+            log.error("Unexpected error: {0}".format(msg))
+            raise HTTPBadRequest(msg)
+
+        result["global_tags"] = {"ipendpoint": endpoint_node["name"], "host": endpoint_node["host"], "location": hosted_by_node["name"], "sr": endpoint_node["sr"],
+                                 "sfc": reference_node["sfc"], "sfc_i": reference_node["sfc_i"], "sf": sf_node["name"], "sf_i": sf_i_node["name"]}
+
+        # calculate the Round-Trip-Time
+        total_forward_latency = sum(result["forward_latencies"])
+        result["total_forward_latency"] = total_forward_latency
+        total_reverse_latency = sum(result["reverse_latencies"])
+        result["total_reverse_latency"] = total_reverse_latency
+        bandwidth = self.request.registry.settings["network_bandwidth"]
+        result["bandwidth"] = bandwidth
+        service_delay = result["response_time"]
+        request_size = result["request_size"]
+        response_size = result["response_size"]
+
+        round_trip_time = self.calculate_round_trip_time(total_forward_latency, total_reverse_latency, service_delay, request_size, response_size, bandwidth)
+        result["round_trip_time"] = round_trip_time
+
+        return result
+
+    @staticmethod
+    def calculate_round_trip_time(forward_latency, reverse_latency, service_delay, request_size, response_size, bandwidth, packet_size=1500, packet_header_size=50):
+        """
+        Calculates the round trip time given the list of arguments.
+
+        :param forward_latency: network latency in forward direction (s)
+        :param reverse_latency: network latency in reverse direction (s)
+        :param service_delay: media service delay (s)
+        :param request_size: request size (bytes)
+        :param response_size: response size (bytes)
+        :param bandwidth: network bandwidth (Mb/s)
+        :param packet_size: size of packet (bytes)
+        :param packet_header_size: size of the header of the packet (bytes)
+        :return: the calculated round trip time
+        """
+
+        if forward_latency > 0 and reverse_latency > 0:
+            forward_data_delay = (8/10**6) * (request_size / bandwidth) * (packet_size / (packet_size - packet_header_size))
+            reverse_data_delay = (8/10**6) * (response_size / bandwidth) * (packet_size / (packet_size - packet_header_size))
+        else:
+            forward_data_delay, reverse_data_delay = 0, 0
+
+        return forward_latency + forward_data_delay + service_delay + reverse_latency + reverse_data_delay
diff --git a/src/service/clmcservice/initialize_db.py b/src/service/clmcservice/initialize_db.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6987d65ce72a38ae229b6ad9cdd55d0756efb47
--- /dev/null
+++ b/src/service/clmcservice/initialize_db.py
@@ -0,0 +1,61 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          25-06-2018
+//      Created for Project :   FLAME
+"""
+
+import os
+import sys
+from sqlalchemy import engine_from_config
+from pyramid.paster import get_appsettings, setup_logging
+from clmcservice.models.meta import Base
+
+
+def usage(argv):
+    """
+    A method to be called when the script has been used in an incorrect way.
+
+    :param argv: cmd arguments
+    """
+
+    cmd = os.path.basename(argv[0])
+    print('usage: %s <config_uri>\n'
+          '(example: "%s development.ini")' % (cmd, cmd))
+    sys.exit(1)
+
+
+def main(argv=sys.argv):
+    """
+    Main method of the script - initialises the database by creating all tables declared in the models.py module
+
+    :param argv: command line arguments - expects a configuration .ini file from which it retrieves the URL with which to connect to postgresql
+    """
+
+    if len(argv) != 2:
+        usage(argv)  # in case of wrong usage
+
+    config_uri = argv[1]
+    setup_logging(config_uri)
+
+    settings = get_appsettings(config_uri)  # get application specific settings
+    engine = engine_from_config(settings, 'sqlalchemy.')  # create the db engine from the sqlalchemy setting configured in the .ini file
+
+    Base.metadata.create_all(engine)  # creates all model tables
diff --git a/src/service/clmcservice/models/__init__.py b/src/service/clmcservice/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ceb55de67a31ce1456af9e100204097b5120309
--- /dev/null
+++ b/src/service/clmcservice/models/__init__.py
@@ -0,0 +1,3 @@
+from .meta import DBSession
+from .whoami_models import ServiceFunctionEndpoint
+from .config_models import ServiceFunctionChain
diff --git a/src/service/clmcservice/models/config_models.py b/src/service/clmcservice/models/config_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..17ced600999db1440c6afc1daaf26f2aab6b6022
--- /dev/null
+++ b/src/service/clmcservice/models/config_models.py
@@ -0,0 +1,83 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          02-07-2018
+//      Created for Project :   FLAME
+"""
+
+from sqlalchemy import Column, String, and_
+from sqlalchemy.dialects.postgresql import JSONB
+from clmcservice.models.meta import Base
+
+
+class ServiceFunctionChain(Base):
+    """
+    This class defines the service function chain model of the config API, declaring the relations between individual service functions per service function chain.
+    """
+
+    __tablename__ = 'sfchain'  # table name in the PostgreSQL database
+
+    sfc = Column(String, nullable=False, primary_key=True)  # service function chain label
+    chain = Column(JSONB, nullable=False)  # the service function chain graph represented by a python dictionary (JSON object essentially)
+
+    @property
+    def json(self):
+        """
+        Converts an instance of a ServiceFunctionChain to JSON format.
+
+        :return: a python dictionary object
+        """
+
+        fields = {c.name: getattr(self, c.name) for c in self.__table__.columns}
+
+        return fields
+
+    @staticmethod
+    def required_columns():
+        """
+        Returns the required columns for constructing a valid instance.
+        :return: a generator object
+        """
+
+        return tuple(column.name for column in ServiceFunctionChain.__table__.columns)
+
+    @staticmethod
+    def get(sfc):
+        """
+        Gets the instance matching the sfc argument
+
+        :param sfc: service function chain id
+
+        :return: the first object from the result set that matches the sfc argument (must be only one)
+        """
+
+        return ServiceFunctionChain.query().filter(and_(ServiceFunctionChain.sfc == sfc)).first()
+
+    @staticmethod
+    def exists(sfc):
+        """
+        Checks if an instance matching the sfc exists.
+
+        :param sfc: service function chain id
+
+        :return: True if exists, False otherwise
+        """
+
+        return ServiceFunctionChain.get(sfc) is not None
diff --git a/src/service/clmcservice/models/meta.py b/src/service/clmcservice/models/meta.py
new file mode 100644
index 0000000000000000000000000000000000000000..698d750c650ec21a442f217b4e0f998a09ca68ea
--- /dev/null
+++ b/src/service/clmcservice/models/meta.py
@@ -0,0 +1,107 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          02-07-2018
+//      Created for Project :   FLAME
+"""
+
+
+import transaction
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import scoped_session, sessionmaker
+from zope.sqlalchemy import ZopeTransactionExtension
+
+
+DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))  # initialise a ORM session, ought to be reused across the different modules
+
+
+class ORMClass(object):
+    """
+    Declares a parent class for all models which eases querying
+    """
+
+    @classmethod
+    def query(cls):
+        """
+        Pass down the class name when using the DBSession.query method and use ModelClass.query() instead of DBSession.query(ModelClass)
+
+        :return: the query result object
+        """
+
+        global DBSession
+
+        return DBSession.query(cls)
+
+    @staticmethod
+    def add(instance):
+        """
+        Adds an instance of a model to the database.
+
+        :param instance: the instance to be created in the db.
+        """
+
+        global DBSession
+
+        with transaction.manager:
+            DBSession.add(instance)
+
+    @staticmethod
+    def delete(instance):
+        """
+        Deletes an instance of a model from the database.
+
+        :param instance: the instance to be deleted from the db.
+        """
+
+        global DBSession
+
+        with transaction.manager:
+            DBSession.delete(instance)
+
+    @staticmethod
+    def replace(old_instance, new_instance):
+        """
+        Replaces an instance of a model from the database with a new instance.
+
+        :param old_instance: the instance to be replaced from the db.
+        :param new_instance: the new instance
+        """
+
+        global DBSession
+
+        with transaction.manager:
+            DBSession.add(new_instance)
+            DBSession.delete(old_instance)
+
+    @classmethod
+    def delete_all(cls):
+        """
+        Deletes all instances of a model from the database.
+        """
+
+        global DBSession
+
+        with transaction.manager:
+            deleted_rows = DBSession.query(cls).delete()
+
+        return deleted_rows
+
+
+Base = declarative_base(cls=ORMClass)  # initialise a declarative Base instance to use for the web app models (inherits from the base ORM class defined above)
diff --git a/src/service/clmcservice/models/whoami_models.py b/src/service/clmcservice/models/whoami_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..30466ccfeb4ad77ad7c13a020e1664f1f67f33fe
--- /dev/null
+++ b/src/service/clmcservice/models/whoami_models.py
@@ -0,0 +1,105 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          02-07-2018
+//      Created for Project :   FLAME
+"""
+
+from sqlalchemy import Column, String, Integer, UniqueConstraint, and_
+from clmcservice.models.meta import Base
+
+
+class ServiceFunctionEndpoint(Base):
+    """
+    This class defines the main model of the WHOAMI API, declaring the global tags for a specific service function on a specific endpoint.
+    """
+
+    __tablename__ = 'sfendpoint'  # table name in the PostgreSQL database
+
+    __table_args__ = (UniqueConstraint('sf_i', 'sf_endpoint', 'sr'),)  # defines a unique constraint across 3 columns - sf_i, sf_endpoint, sr
+
+    uid = Column(Integer, primary_key=True, autoincrement=True, nullable=False)  # a primary key integer field (auto incremented)
+
+    location = Column(String, nullable=False)  # cluster label
+    sfc = Column(String, nullable=False)  # service function chain label
+    sfc_i = Column(String, nullable=False)  # service function chain instance identifier
+    sf = Column(String, nullable=False)  # service function label
+    sf_i = Column(String, nullable=False)   # service function identifier (potentially FQDN)
+    sf_endpoint = Column(String, nullable=False)  # service function endpoint (potentially IP address)
+    sr = Column(String, nullable=False)  # service router ID - service router that connects the VM to FLAME
+
+    @property
+    def json(self):
+        """
+        Converts an instance of a ServiceFunctionEndpoint to JSON format.
+
+        :return: a python dictionary object
+        """
+
+        fields = {c.name: getattr(self, c.name) for c in self.__table__.columns}
+        fields.pop("uid")
+
+        return fields
+
+    @staticmethod
+    def required_columns():
+        """
+        Returns the required columns for constructing a valid instance.
+
+        :return: a generator object
+        """
+
+        return tuple(column.name for column in ServiceFunctionEndpoint.__table__.columns if column.name != "uid")
+
+    @staticmethod
+    def constrained_columns():
+        """
+        :return: the columns that are uniquely identifying an instance of this model.
+        """
+
+        return tuple(column.name for column in ServiceFunctionEndpoint.__table_args__[0].columns)
+
+    @staticmethod
+    def get(sf_i, sf_endpoint, sr):
+        """
+        Gets the instance matching the unique constraint or None if not existing.
+
+        :param sf_i: service function instance
+        :param sf_endpoint: service function endpoint
+        :param sr: service router
+
+        :return: the first object from the result set that matches the unique constraint or None
+        """
+
+        return ServiceFunctionEndpoint.query().filter(and_(ServiceFunctionEndpoint.sf_i == sf_i, ServiceFunctionEndpoint.sf_endpoint == sf_endpoint, ServiceFunctionEndpoint.sr == sr)).first()
+
+    @staticmethod
+    def exists(sf_i, sf_endpoint, sr):
+        """
+        Checks if an instance matching the unique constraint exists.
+
+        :param sf_i: service function instance
+        :param sf_endpoint: service function endpoint
+        :param sr: service router
+
+        :return: True if exists, False otherwise
+        """
+
+        return ServiceFunctionEndpoint.get(sf_i, sf_endpoint, sr) is not None
diff --git a/src/service/clmcservice/test_sr_mock_view.py b/src/service/clmcservice/test_sr_mock_view.py
deleted file mode 100644
index ddf51a9cc6f7a5348aed31466e5002a703dd8665..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/test_sr_mock_view.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import pytest
-from pyramid import testing
-from pyramid.httpexceptions import HTTPBadRequest
-from time import sleep
-from clmcservice.utilities import CONFIG_ATTRIBUTES, PROCESS_ATTRIBUTE, RUNNING_FLAG, MALFORMED_FLAG, URL_REGEX
-import os
-import signal
-
-
-class TestSFEMCMockAPI(object):
-
-    @pytest.fixture(autouse=True)
-    def app_config(self):
-        print("app_config")
-        self.config = testing.setUp()
-
-        # endpoint
-        # sr
-        # sfc_i
-        # sf_i
- 
-        self.config.add_settings({
-            'my_endpoint_1': {'sfc_i': 'my_sfc_i_1', 'sf_i': 'my_sf_i_1', 'sr': 'my_sr_1'},
-            'my_endpoint_2': {'sfc_i': 'my_sfc_i_2', 'sf_i': 'my_sf_i_2', 'sr': 'my_sr_2'}})
-
-        yield
-
-        testing.tearDown()
-        
-
-    def test_GET_config(self):
-        print("Test get")
-
-        # nested import so that importing the class view is part of the test itself
-        from clmcservice.views import SFEMCMockConfig  
-
-        request = testing.DummyRequest()
-        response = SFEMCMockConfig(request).get()
-
-        print("response={0}".format(response))
-
-    @pytest.mark.parametrize("input_body, output_value", [
-        ('{"aggregator_report_period": 10, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://171.40.231.51:8086"}',
-         {'aggregator_report_period': 10, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://171.40.231.51:8086"}),
-    ])
-    def test_PUT_config(self, input_body, output_value):
-        print("Test put")
-
-    
\ No newline at end of file
diff --git a/src/service/clmcservice/views.py b/src/service/clmcservice/views.py
deleted file mode 100644
index 84fae5b8404410c278117ffe99d767f253385ac9..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/views.py
+++ /dev/null
@@ -1,346 +0,0 @@
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          15-05-2018
-//      Created for Project :   FLAME
-"""
-
-from pyramid.view import view_defaults
-from pyramid.httpexceptions import HTTPBadRequest, HTTPInternalServerError
-from influxdb import InfluxDBClient
-from urllib.parse import urlparse
-from subprocess import Popen
-from clmcservice.utilities import validate_config_content, validate_action_content, validate_round_trip_query_params, \
-    CONFIG_ATTRIBUTES, ROUND_TRIP_ATTRIBUTES, RUNNING_FLAG, PROCESS_ATTRIBUTE, MALFORMED_FLAG, COMMENT_ATTRIBUTE, COMMENT_VALUE
-import os
-import os.path
-import sys
-import logging
-
-
-log = logging.getLogger('service_logger')
-
-
-@view_defaults(route_name='aggregator_config', renderer='json')
-class AggregatorConfig(object):
-    """
-    A class-based view for accessing and mutating the configuration of the aggregator.
-    """
-
-    def __init__(self, request):
-        """
-        Initialises the instance of the view with the request argument.
-
-        :param request: client's call request
-        """
-
-        self.request = request
-
-    def get(self):
-        """
-        A GET API call for the configuration of the aggregator.
-
-        :return: A JSON response with the configuration of the aggregator.
-        """
-
-        aggregator_data = self.request.registry.settings
-        config = {key: aggregator_data.get(key) for key in CONFIG_ATTRIBUTES}
-
-        return config
-
-    def put(self):
-        """
-        A PUT API call for the status of the aggregator.
-
-        :return: A JSON response to the PUT call - essentially with the new configured data and comment of the state of the aggregator
-        :raises HTTPBadRequest: if request body is not a valid JSON for the configurator
-        """
-
-        old_config = {attribute: self.request.registry.settings.get(attribute) for attribute in CONFIG_ATTRIBUTES}
-        new_config = self.request.body.decode(self.request.charset)
-
-        try:
-            new_config = validate_config_content(new_config)
-
-            for attribute in CONFIG_ATTRIBUTES:
-                self.request.registry.settings[attribute] = new_config.get(attribute)
-
-            # if configuration is not already malformed, check whether the configuration is updated
-            if not self.request.registry.settings[MALFORMED_FLAG]:
-                malformed = old_config != new_config and AggregatorController.is_process_running(self.request.registry.settings.get(PROCESS_ATTRIBUTE))
-                self.request.registry.settings[MALFORMED_FLAG] = malformed
-                if malformed:
-                    new_config[MALFORMED_FLAG] = True
-                    new_config[COMMENT_ATTRIBUTE] = COMMENT_VALUE
-
-            return new_config
-
-        except AssertionError:
-            raise HTTPBadRequest("Bad request content - configuration format is incorrect.")
-
-
-@view_defaults(route_name='aggregator_controller', renderer='json')
-class AggregatorController(object):
-
-    """
-    A class-based view for controlling the aggregator.
-    """
-
-    def __init__(self, request):
-        """
-        Initialises the instance of the view with the request argument.
-
-        :param request: client's call request
-        """
-
-        self.request = request
-
-    def get(self):
-        """
-        A GET API call for the status of the aggregator - running or not.
-
-        :return: A JSON response with the status of the aggregator.
-        """
-
-        aggregator_data = self.request.registry.settings
-        aggregator_process = aggregator_data.get(PROCESS_ATTRIBUTE)
-        aggregator_running = self.is_process_running(aggregator_process)
-
-        config = {RUNNING_FLAG: aggregator_running}
-
-        if aggregator_data[MALFORMED_FLAG] and aggregator_running:
-            config[MALFORMED_FLAG] = True
-            config[COMMENT_ATTRIBUTE] = COMMENT_VALUE
-
-        return config
-
-    def put(self):
-        """
-        A PUT API call for the status of the aggregator.
-
-        :return: A JSON response to the PUT call - essentially saying whether the aggregator is running or not
-        :raises HTTPBadRequest: if request body is not a valid JSON for the controller
-        """
-
-        content = self.request.body.decode(self.request.charset)
-
-        try:
-            content = validate_action_content(content)
-
-            config = {attribute: self.request.registry.settings.get(attribute) for attribute in CONFIG_ATTRIBUTES}
-
-            action = content['action']
-
-            aggregator_running = self.is_process_running(self.request.registry.settings.get(PROCESS_ATTRIBUTE))
-            if action == 'start':
-                if not aggregator_running:
-                    process = self.start_aggregator(config)
-                    aggregator_running = True
-                    self.request.registry.settings[PROCESS_ATTRIBUTE] = process
-            elif action == 'stop':
-                self.stop_aggregator(self.request.registry.settings.get(PROCESS_ATTRIBUTE))
-                aggregator_running = False
-                self.request.registry.settings[PROCESS_ATTRIBUTE] = None
-                self.request.registry.settings[MALFORMED_FLAG] = False
-            elif action == 'restart':
-                self.stop_aggregator(self.request.registry.settings.get(PROCESS_ATTRIBUTE))
-                process = self.start_aggregator(config)
-                aggregator_running = True
-                self.request.registry.settings[PROCESS_ATTRIBUTE] = process
-                self.request.registry.settings[MALFORMED_FLAG] = False
-
-            return {RUNNING_FLAG: aggregator_running}
-
-        except AssertionError:
-            raise HTTPBadRequest('Bad request content - must be in JSON format: {"action": value}, where value is "start", "stop" or "restart".')
-
-    @staticmethod
-    def start_aggregator(config):
-        """
-        An auxiliary method to start the aggregator.
-
-        :param config: the configuration containing the arguments for the aggregator
-        :return: the process object of the started aggregator script
-        """
-
-        dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'aggregation')
-        python_interpreter = sys.executable
-        command = [python_interpreter, 'aggregator.py', '--period', str(config.get('aggregator_report_period')), '--database',
-                   config.get('aggregator_database_name'), '--url', config.get('aggregator_database_url')]
-        process = Popen(command, cwd=dir_path)
-
-        log.info("\nStarted aggregator process with PID: {0}\n".format(process.pid))
-
-        return process
-
-    @staticmethod
-    def stop_aggregator(process):
-        """
-        An auxiliary method to stop the aggregator.
-
-        :param process: the process to terminate
-        """
-
-        # check if the process is started
-        if AggregatorController.is_process_running(process):
-            process.terminate()
-            log.info("\nStopped aggregator process with PID: {0}\n".format(process.pid))
-
-    @staticmethod
-    def is_process_running(process):
-        """
-        Checks if a process is running.
-
-        :param process: the Popen object to check
-        :return: True if running, False otherwise
-        """
-
-        # check if the process is started before trying to terminate it - process.poll() only returns something if the process has terminated, hence we check for a None value
-        return process is not None and process.poll() is None
-
-
-@view_defaults(route_name='round_trip_time_query', renderer='json')
-class RoundTripTimeQuery(object):
-
-    """
-    A class-based view for querying the round trip time in a given range.
-    """
-
-    def __init__(self, request):
-        """
-        Initialises the instance of the view with the request argument.
-
-        :param request: client's call request
-        """
-
-        self.request = request
-
-    def get(self):
-        """
-        A GET API call for the averaged round trip time of a specific media service over a given time range.
-
-        :return: A JSON response with the round trip time and its contributing parts.
-        """
-
-        params = {}
-        for attribute in ROUND_TRIP_ATTRIBUTES:
-            if attribute in self.request.params:
-                params[attribute] = self.request.params.get(attribute)
-
-        try:
-            params = validate_round_trip_query_params(params)
-            config_data = {config_attribute: self.request.registry.settings.get(config_attribute) for config_attribute in CONFIG_ATTRIBUTES}
-
-            media_service = params.get(ROUND_TRIP_ATTRIBUTES[0])
-            start_timestamp = params.get(ROUND_TRIP_ATTRIBUTES[1])
-            end_timestamp = params.get(ROUND_TRIP_ATTRIBUTES[2])
-            influx_db_name = config_data.get(CONFIG_ATTRIBUTES[1])
-            influx_db_url = config_data.get(CONFIG_ATTRIBUTES[2])
-
-            url_object = urlparse(influx_db_url)
-            try:
-                db_client = InfluxDBClient(host=url_object.hostname, port=url_object.port, database=influx_db_name, timeout=10)
-                query = 'SELECT mean(*) FROM "{0}"."autogen"."e2e_delays" WHERE time >= {1} and time < {2} and sf_instance = \'{3}\''.format(
-                    influx_db_name, start_timestamp, end_timestamp, media_service)
-                log.info("Executing query: {0}".format(query))
-                result = db_client.query(query)
-
-                actual_result = next(result.get_points(), None)
-                if actual_result is None:
-                    return {"result": None}
-                else:
-                    forward_latency = actual_result.get("mean_delay_forward")
-                    reverse_latency = actual_result.get("mean_delay_reverse")
-                    service_delay = actual_result.get("mean_delay_service")
-                    request_size = actual_result.get("mean_avg_request_size")
-                    response_size = actual_result.get("mean_avg_response_size")
-                    bandwidth = actual_result.get("mean_avg_bandwidth")
-
-                    rtt = self.calculate_round_trip_time(forward_latency, reverse_latency, service_delay, request_size, response_size, bandwidth)
-                    return {"result": rtt}
-            except Exception as e:
-                msg = "Cannot instantiate connection with database {0} on url {1}.".format(influx_db_name, influx_db_url)
-                log.info(msg)
-                log.error(type(e))
-                log.error(e)
-                log.error(e.args)
-
-                raise HTTPInternalServerError(msg)
-
-        except AssertionError:
-            raise HTTPBadRequest('Bad request content - must be in JSON format: {"media_service": value, "start_timestamp": value, "end_timestamp": value}.')
-
-    @staticmethod
-    def calculate_round_trip_time(forward_latency, reverse_latency, service_delay, request_size, response_size, bandwidth, packet_size=1500, packet_header_size=50):
-        """
-        Calculates the round trip time given the list of arguments.
-
-        :param forward_latency: network latency in forward direction (s)
-        :param reverse_latency: network latency in reverse direction (s)
-        :param service_delay: media service delay (s)
-        :param request_size: request size (bytes)
-        :param response_size: response size (bytes)
-        :param bandwidth: network bandwidth (Mb/s)
-        :param packet_size: size of packet (bytes)
-        :param packet_header_size: size of the header of the packet (bytes)
-        :return: the calculated round trip time
-        """
-
-        forward_data_delay = (8/10**6) * (request_size / bandwidth) * (packet_size / (packet_size - packet_header_size))
-        reverse_data_delay = (8/10**6) * (response_size / bandwidth) * (packet_size / (packet_size - packet_header_size))
-
-        return forward_latency + forward_data_delay + service_delay + reverse_latency + reverse_data_delay
-
-@view_defaults(route_name='sfemc_config', renderer='json')
-class SFEMCMockConfig(object):
-    """
-    A class-based view for accessing and mutating the configuration of the aggregator.
-    """
-
-    def __init__(self, request):
-        """
-        Initialises the instance of the view with the request argument.
-
-        :param request: client's call request
-        """
-
-        self.request = request
-
-    def get(self):
-        """
-        A GET API call for endpoint configuration.
-
-        :return: A JSON response with the configuration of the aggregator.
-        """
-
-        log.debug("\Getting endpoint configuration\n")
-
-        config = {"key": "hello"}
-
-        return config
-
-    def put(self):
-        """
-        A PUT API call for the status of the aggregator.
-
-        :return: A JSON response to the PUT call - essentially with the new configured data and comment of the state of the aggregator
-        :raises HTTPBadRequest: if request body is not a valid JSON for the configurator
-        """
-
-        log.debug("\Putting endpoint configuration\n")
diff --git a/src/service/clmcservice/whoamiapi/__init__.py b/src/service/clmcservice/whoamiapi/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc
--- /dev/null
+++ b/src/service/clmcservice/whoamiapi/__init__.py
@@ -0,0 +1 @@
+
diff --git a/src/service/clmcservice/whoamiapi/conftest.py b/src/service/clmcservice/whoamiapi/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a531290eeb89beaebe472a07fe266ca7b608c75
--- /dev/null
+++ b/src/service/clmcservice/whoamiapi/conftest.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          25-06-2018
+//      Created for Project :   FLAME
+"""
+
+import pytest
+from sqlalchemy import create_engine
+from sqlalchemy.exc import ProgrammingError, OperationalError
+from clmcservice.models.meta import DBSession, Base
+
+
+def create_test_database(db_name):
+    """
+    This function creates a test database with the given name. If the database already exists, it is recreated.
+
+    :param db_name: the test database name
+    """
+
+    engine = create_engine("postgresql://clmc:clmc_service@localhost:5432/postgres", echo=False)
+    conn = engine.connect().execution_options(autocommit=False)
+    conn.execute("ROLLBACK")  # connection is already in a transaction, hence roll back (postgres databases cannot be created in a transaction)
+    try:
+        conn.execute("DROP DATABASE %s" % db_name)
+        print("\nOld database '{0}' has been deleted.".format(db_name))
+    except ProgrammingError:
+        # database probably doesn't exist
+        conn.execute("ROLLBACK")
+    except OperationalError as e:
+        print(e)
+        # database exists and is probably being used by other users
+        conn.execute("ROLLBACK")
+        conn.close()
+        engine.dispose()
+        raise pytest.exit("Old test database cannot be deleted.")
+
+    conn.execute("CREATE DATABASE %s" % db_name)
+    conn.close()
+    engine.dispose()
+    print("\nNew test database '{0}' has been created.".format(db_name))
+
+
+def initialise_database(db_name):
+    """
+    This function initialises the test database by binding the shared DB session to a new connection engine and creating tables for all models.
+
+    :param db_name: test database name
+    :return: the configured DB session, which is connected to the test database
+    """
+
+    engine = create_engine('postgresql://clmc:clmc_service@localhost:5432/{0}'.format(db_name))  # create an engine to connect to the test database
+    DBSession.configure(bind=engine)  # configure the database session
+    Base.metadata.bind = engine
+    Base.metadata.create_all()  # create tables for all models
+
+    return DBSession, engine
+
+
+def drop_test_database(db_name):
+    """
+    This function removes the test database with the given name, if it exists
+
+    :param db_name: the test database name
+    """
+
+    engine = create_engine("postgresql://clmc:clmc_service@localhost:5432/postgres", echo=False)
+    conn = engine.connect().execution_options(autocommit=False)
+    conn.execute("ROLLBACK")  # connection is already in a transaction, hence roll back (postgres databases cannot be created in a transaction)
+    try:
+        conn.execute("DROP DATABASE %s" % db_name)
+        print("\nTest database '{0}' has been deleted.".format(db_name))
+    except ProgrammingError:
+        # database probably doesn't exist
+        conn.execute("ROLLBACK")
+    except OperationalError as e:
+        print(e)
+        # database is probably being used by other users
+        conn.execute("ROLLBACK")
+
+    conn.close()
+    engine.dispose()
+
+
+@pytest.fixture(scope='module', autouse=True)
+def testing_db_session():
+
+    test_database = "whoamitestdb"
+    create_test_database(test_database)  # create a database used for executing the unit tests
+    db_session, engine = initialise_database(test_database)  # initialise the database with the models and retrieve a db session
+
+    yield db_session  # return the db session if needed in any of the tests
+
+    db_session.remove()  # remove the db session
+    engine.dispose()  # dispose from the engine
+    drop_test_database(test_database)  # remove the test database
diff --git a/src/service/clmcservice/whoamiapi/tests.py b/src/service/clmcservice/whoamiapi/tests.py
new file mode 100644
index 0000000000000000000000000000000000000000..f50886616960d4c6fb8c24425895757057e69525
--- /dev/null
+++ b/src/service/clmcservice/whoamiapi/tests.py
@@ -0,0 +1,334 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          25-06-2018
+//      Created for Project :   FLAME
+"""
+
+import pytest
+from json import dumps
+from pyramid import testing
+from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound, HTTPConflict
+from clmcservice.models import ServiceFunctionEndpoint
+from clmcservice.whoamiapi.views import WhoamiAPI
+
+
+class TestWhoamiAPI(object):
+    """
+    A pytest-implementation test for the WHOAMI API endpoints
+    """
+
+    @pytest.fixture(autouse=True)
+    def app_config(self):
+        """
+        A fixture to implement setUp/tearDown functionality for all tests by initializing configuration structure for the web service and db connection
+        """
+
+        self.registry = testing.setUp()
+
+        yield
+
+        testing.tearDown()
+        ServiceFunctionEndpoint.delete_all()  # clear the instances of the model in the test database
+
+    def test_get_all(self):
+        """
+        Tests the GET all method of the WHOAMI API - returns a list of all service function endpoint configurations from the database.
+        """
+
+        request = testing.DummyRequest()
+        response = WhoamiAPI(request).get_all()
+        assert response == [], "Initially there mustn't be any service function endpoint configurations in the database."
+
+        sf_e = ServiceFunctionEndpoint(location="DC1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i1", sf_endpoint="sf_endpoint1", sr="sr1")
+        expected_response_data = [sf_e.json]
+        ServiceFunctionEndpoint.add(sf_e)  # adds the new instance of the model to the database
+
+        request = testing.DummyRequest()
+        response = WhoamiAPI(request).get_all()
+        assert response == expected_response_data, "Incorrect response data with 1 service function endpoint configuration."
+
+        sf_e = ServiceFunctionEndpoint(location="DC2", sfc="sfc2", sfc_i="sfc_i2", sf="sf2", sf_i="sf_i2", sf_endpoint="sf_endpoint2", sr="sr2")
+        expected_response_data.append(sf_e.json)
+        ServiceFunctionEndpoint.add(sf_e)
+        sf_e = ServiceFunctionEndpoint(location="DC3", sfc="sfc3", sfc_i="sfc_i3", sf="sf3", sf_i="sf_i3", sf_endpoint="sf_endpoint3", sr="sr3")
+        expected_response_data.append(sf_e.json)
+        ServiceFunctionEndpoint.add(sf_e)
+
+        request = testing.DummyRequest()
+        response = WhoamiAPI(request).get_all()
+        assert response == expected_response_data, "Incorrect response data with more than 1 service function endpoint configurations."
+
+    def test_get_one(self):
+        """
+        Tests the GET one method of the WHOAMI API - returns an instance of a service function endpoint configuration from the database.
+        """
+
+        request = testing.DummyRequest()
+        response = WhoamiAPI(request).get_all()
+        assert response == [], "Initially there mustn't be any service function endpoint configurations in the database."
+
+        self._validation_of_url_parameters_test("get_one")
+
+        sf_e = ServiceFunctionEndpoint(location="DC1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i1", sf_endpoint="sf_endpoint1", sr="sr1")
+        expected_response_data = sf_e.json
+        ServiceFunctionEndpoint.add(sf_e)  # adds the new instance of the model to the database
+
+        request = testing.DummyRequest()
+        request.params["sf_endpoint"] = "sf_endpoint1"
+        request.params["sf_i"] = "sf_i1"
+        request.params["sr"] = "sr1"
+        response = WhoamiAPI(request).get_one()
+        assert response == expected_response_data, "Invalid data returned in the response of GET instance"
+
+        request = testing.DummyRequest()
+        request.params["sf_endpoint"] = "sf_endpoint2"
+        request.params["sf_i"] = "sf_i2"
+        request.params["sr"] = "sr2"
+        error_raised = False
+        try:
+            WhoamiAPI(request).get_one()
+        except HTTPNotFound:
+            error_raised = True
+        assert error_raised, "Not found error must be raised in case of a non existing service function endpoint"
+
+    def test_post(self):
+        """
+        Tests the POST method of the WHOAMI API - creates an instance of a service function endpoint configuration in the database.
+        """
+
+        request = testing.DummyRequest()
+        response = WhoamiAPI(request).get_all()
+        assert response == [], "Initially there mustn't be any service function endpoint configurations in the database."
+
+        resource = dict(location="DC1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i1", sf_endpoint="sf_endpoint1", sr="sr1")
+        json_data = dumps(resource)
+        request = testing.DummyRequest()
+        request.body = json_data.encode(request.charset)
+        response = WhoamiAPI(request).post()
+        assert response == resource, "POST request must return the created resource"
+        assert ServiceFunctionEndpoint.exists("sf_i1", "sf_endpoint1", "sr1"), "POST request must have created the resource"
+
+        resource["location"] = "DC2"
+        json_data = dumps(resource)
+        request = testing.DummyRequest()
+        request.body = json_data.encode(request.charset)
+        error_raised = False
+        try:
+            WhoamiAPI(request).post()
+        except HTTPConflict:
+            error_raised = True
+        assert error_raised, "An error must be raised when trying to create a resource which breaks the unique constraint"
+
+    @pytest.mark.parametrize("body, valid", [
+        ('{"location": "DC1", "sfc": "sfc1", "sfc_i": "sfc_i1", "sf": "sf1", "sf_i": "sf_i1", "sf_endpoint": "sf_endpoint1", "sr": "sr1"}', True),
+        ('{"location": "DC2", "sfc": "sfc2", "sfc_i": "sfc_i2", "sf": "sf2", "sf_i": "sf_i2", "sf_endpoint": "sf_endpoint2", "sr": "sr2"}', True),
+        ('{}', False),
+        ('{"location": "DC1", "sfc": "sfc1", "sfc_i": "sfc_i1", "sf": "sf1", "sf_i": "sf_i1"}', False),
+        ('{"place": "DC2", "sfc": "sfc2", "sfc_i": "sfc_i2", "sf": "sf2", "sf_i": "sf_i2", "sf_endpoint": "sf_endpoint2", "sr": "sr2"}', False),
+        ('{invalid json}', False),
+    ])
+    def test_post_body_validation(self, body, valid):
+        """
+        Tests the POST request validation of the body content.
+
+        :param body: The request body to be validated
+        :param valid: True if body is valid, False otherwise
+        """
+
+        request = testing.DummyRequest()
+        request.body = body.encode(request.charset)
+        error_raised = False
+        try:
+            WhoamiAPI(request).post()
+        except HTTPBadRequest:
+            error_raised = True
+        assert error_raised == (not valid), "An error must be raised in case of an invalid request body"
+
+    def test_put(self):
+        """
+        Tests the PUT method of the WHOAMI API - overwrites an instance of a service function endpoint configuration from the database.
+        """
+
+        request = testing.DummyRequest()
+        response = WhoamiAPI(request).get_all()
+        assert response == [], "Initially there mustn't be any service function endpoint configurations in the database."
+
+        self._validation_of_url_parameters_test("put")
+
+        resource = dict(location="location1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i1", sf_endpoint="sf_endpoint1", sr="sr1")
+        body = dumps(resource)
+        request = testing.DummyRequest()
+        request.params["sf_endpoint"] = "sf_endpoint1"
+        request.params["sf_i"] = "sf_i1"
+        request.params["sr"] = "sr1"
+        request.body = body.encode(request.charset)
+        error_raised = False
+        try:
+            WhoamiAPI(request).put()
+        except HTTPNotFound:
+            error_raised = True
+        assert error_raised, "Not found error must be raised in case of a non existing service function endpoint"
+
+        sf_e = ServiceFunctionEndpoint(location="DC1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i1", sf_endpoint="sf_endpoint1", sr="sr1")
+        ServiceFunctionEndpoint.add(sf_e)  # adds the new instance of the model to the database
+
+        resource = dict(location="location1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i1", sf_endpoint="sf_endpoint1", sr="sr1")
+        body = dumps(resource)
+        request = testing.DummyRequest()
+        request.params["sf_endpoint"] = "sf_endpoint1"
+        request.params["sf_i"] = "sf_i1"
+        request.params["sr"] = "sr1"
+        request.body = body.encode(request.charset)
+        response = WhoamiAPI(request).put()
+        assert response == resource, "PUT request must return the updated resource"
+        assert ServiceFunctionEndpoint.get("sf_i1", "sf_endpoint1", "sr1").json["location"] == "location1"
+
+        resource = dict(location="DC1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i2", sf_endpoint="sf_endpoint2", sr="sr2")
+        body = dumps(resource)
+        request = testing.DummyRequest()
+        request.params["sf_endpoint"] = "sf_endpoint1"
+        request.params["sf_i"] = "sf_i1"
+        request.params["sr"] = "sr1"
+        request.body = body.encode(request.charset)
+        response = WhoamiAPI(request).put()
+        assert response == resource, "PUT request must return the updated resource"
+        assert not ServiceFunctionEndpoint.exists("sf_i1", "sf_endpoint1", "sr1"), "Resource has not been updated"
+        assert ServiceFunctionEndpoint.exists("sf_i2", "sf_endpoint2", "sr2"), "Resource has not been updated"
+
+        sf_e = ServiceFunctionEndpoint(location="DC1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i1", sf_endpoint="sf_endpoint1", sr="sr1")
+        ServiceFunctionEndpoint.add(sf_e)  # adds the new instance of the model to the database
+
+        resource = dict(location="DC1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i2", sf_endpoint="sf_endpoint2", sr="sr2")
+        body = dumps(resource)
+        request = testing.DummyRequest()
+        request.params["sf_endpoint"] = "sf_endpoint1"
+        request.params["sf_i"] = "sf_i1"
+        request.params["sr"] = "sr1"
+        request.body = body.encode(request.charset)
+        error_raised = False
+        try:
+            WhoamiAPI(request).put()
+        except HTTPConflict:
+            error_raised = True
+        assert error_raised, "PUT request breaks unique constraint"
+
+    @pytest.mark.parametrize("body, valid", [
+        ('{"location": "DC1", "sfc": "sfc1", "sfc_i": "sfc_i1", "sf": "sf1", "sf_i": "sf_i1", "sf_endpoint": "sf_endpoint1", "sr": "sr1"}', True),
+        ('{"location": "DC2", "sfc": "sfc2", "sfc_i": "sfc_i2", "sf": "sf2", "sf_i": "sf_i2", "sf_endpoint": "sf_endpoint2", "sr": "sr2"}', True),
+        ('{}', False),
+        ('{"location": "DC1", "sfc": "sfc1", "sfc_i": "sfc_i1", "sf": "sf1", "sf_i": "sf_i1"}', False),
+        ('{"place": "DC2", "sfc": "sfc2", "sfc_i": "sfc_i2", "sf": "sf2", "sf_i": "sf_i2", "sf_endpoint": "sf_endpoint2", "sr": "sr2"}', False),
+        ('{invalid json}', False),
+    ])
+    def test_put_body_validation(self, body, valid):
+        """
+        Tests the PUT request validation of the body content.
+
+        :param body: The request body to be validated
+        :param valid: True if body is valid, False otherwise
+        """
+
+        sf_e = ServiceFunctionEndpoint(location="DC1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i1", sf_endpoint="sf_endpoint1", sr="sr1")
+        ServiceFunctionEndpoint.add(sf_e)  # adds the new instance of the model to the database
+
+        request = testing.DummyRequest()
+        request.params["sf_endpoint"] = "sf_endpoint1"
+        request.params["sf_i"] = "sf_i1"
+        request.params["sr"] = "sr1"
+        request.body = body.encode(request.charset)
+        error_raised = False
+        try:
+            WhoamiAPI(request).put()
+        except HTTPBadRequest:
+            error_raised = True
+        assert error_raised == (not valid), "An error must be raised in case of an invalid request body"
+
+    def test_delete(self):
+        """
+        Tests the DELETE method of the WHOAMI API - deletes an instance of a service function endpoint configuration from the database.
+        """
+
+        request = testing.DummyRequest()
+        response = WhoamiAPI(request).get_all()
+        assert response == [], "Initially there mustn't be any service function endpoint configurations in the database."
+
+        self._validation_of_url_parameters_test("delete")
+
+        sf_e = ServiceFunctionEndpoint(location="DC1", sfc="sfc1", sfc_i="sfc_i1", sf="sf1", sf_i="sf_i1", sf_endpoint="sf_endpoint1", sr="sr1")
+        to_delete = sf_e.json
+        ServiceFunctionEndpoint.add(sf_e)  # adds the new instance of the model to the database
+
+        assert ServiceFunctionEndpoint.exists("sf_i1", "sf_endpoint1", "sr1")
+
+        request = testing.DummyRequest()
+        request.params["sf_endpoint"] = "sf_endpoint1"
+        request.params["sf_i"] = "sf_i1"
+        request.params["sr"] = "sr1"
+        response = WhoamiAPI(request).delete()
+        assert response == to_delete, "DELETE must return the deleted object if successful"
+
+        assert not ServiceFunctionEndpoint.exists("sf_i1", "sf_endpoint1", "sr1"), "Resource must be deleted after the delete API method has been called."
+
+        request = testing.DummyRequest()
+        request.params["sf_endpoint"] = "sf_endpoint1"
+        request.params["sf_i"] = "sf_i1"
+        request.params["sr"] = "sr1"
+        error_raised = False
+        try:
+            WhoamiAPI(request).delete()
+        except HTTPNotFound:
+            error_raised = True
+        assert error_raised, "Not found error must be raised in case of a non existing service function endpoint"
+
+    @staticmethod
+    def _validation_of_url_parameters_test(method):
+        """
+        Validates the way a whoami API method handles url query parameters
+
+        :param method: the method to test
+        """
+
+        request = testing.DummyRequest()
+        error_raised = False
+        try:
+            getattr(WhoamiAPI(request), method).__call__()
+        except HTTPBadRequest:
+            error_raised = True
+        assert error_raised, "Error must be raised in case of no URL parameters"
+
+        request = testing.DummyRequest()
+        request.params["sf_i"] = "sf_i"
+        request.params["sr"] = "sr"
+        try:
+            getattr(WhoamiAPI(request), method).__call__()
+        except HTTPBadRequest:
+            error_raised = True
+        assert error_raised, "Error must be raised in case of insufficient number of arguments"
+
+        request = testing.DummyRequest()
+        request.params["sf_endp"] = "sf_endpoint"  # argument should be sf_endpoint
+        request.params["sf_i"] = "sf_i"
+        request.params["sr"] = "sr"
+        try:
+            getattr(WhoamiAPI(request), method).__call__()
+        except HTTPBadRequest:
+            error_raised = True
+        assert error_raised, "Error must be raised in case of invalid naming of arguments"
diff --git a/src/service/clmcservice/whoamiapi/utilities.py b/src/service/clmcservice/whoamiapi/utilities.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd141d6025c333eb4e32485d9004a43444a7844c
--- /dev/null
+++ b/src/service/clmcservice/whoamiapi/utilities.py
@@ -0,0 +1,67 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          25-06-2018
+//      Created for Project :   FLAME
+"""
+
+from json import loads
+from clmcservice.models import ServiceFunctionEndpoint
+
+
+def validate_sfendpoint_body(body):
+    """
+    Validates the request body used to create an endpoint configuration resource in the database.
+
+    :param body: the request body to validate
+    :return the validated configuration dictionary object
+    :raise AssertionError: if the body is not a valid configuration
+    """
+
+    try:
+        body = loads(body)
+    except:
+        raise AssertionError("Configuration must be a JSON object.")
+
+    # the database table has one more column which is a UID integer
+    assert len(body) == len(ServiceFunctionEndpoint.__table__.columns) - 1, "Endpoint configuration mustn't contain a different number of attributes than the number of required ones."
+
+    # validate that all required attributes are given in the body
+    for attribute in ServiceFunctionEndpoint.required_columns():
+        assert attribute in body, "Required attribute not found in the request content."
+
+    return body
+
+
+def validate_sfendpoint_params(params):
+    """
+    Validates the request parameters to retrieve an endpoint configuration resource from the database.
+
+    :param params: the parameters dictionary to validate
+    :return: the validated parameters
+    :raise AssertionError: for invalid parameters
+    """
+
+    constrained_cols = ServiceFunctionEndpoint.constrained_columns()
+
+    assert len(params) == len(constrained_cols), "Incorrect url query parameters."
+
+    return params
+
diff --git a/src/service/clmcservice/whoamiapi/views.py b/src/service/clmcservice/whoamiapi/views.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e3a4e650467adb18fbb3b003d051a54dc89dc54
--- /dev/null
+++ b/src/service/clmcservice/whoamiapi/views.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          25-06-2018
+//      Created for Project :   FLAME
+"""
+
+from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPNotFound
+from pyramid.view import view_defaults, view_config
+from clmcservice.models import ServiceFunctionEndpoint
+from clmcservice.whoamiapi.utilities import validate_sfendpoint_body, validate_sfendpoint_params
+
+
+@view_defaults(renderer='json')
+class WhoamiAPI(object):
+    """
+    A class-based view for accessing and mutating the configuration of SF endpoints - namely, the WHOAMI API.
+    """
+
+    def __init__(self, request):
+        """
+        Initialises the instance of the view with the request argument.
+
+        :param request: client's call request
+        """
+
+        self.request = request
+
+    @view_config(route_name='whoami_endpoints', request_method='GET')
+    def get_all(self):
+        """
+        GET API call for all resources.
+
+        :return: A list of all service function endpoint configurations found in the database.
+        """
+
+        return [instance.json for instance in ServiceFunctionEndpoint.query()]
+
+    @view_config(route_name='whoami_endpoints_instance', request_method='GET')
+    def get_one(self):
+        """
+        GET API call for a single resources.
+
+        :return: One service function endpoint configuration instance retrieved from the database by querying the uniquely constrained columns.
+        :raises HTTPBadRequest: if the request parameters are invalid(invalid url query string)
+        :raises HTTPNotFound: if a resource with the given parameters doesn't exist in the database
+        """
+
+        sf_endpoint = self._get_sf_endpoint_from_url_string()
+        if sf_endpoint is None:
+            raise HTTPNotFound("A service function endpoint with the given parameters doesn't exist.")
+        else:
+            return sf_endpoint.json
+
+    @view_config(route_name='whoami_endpoints', request_method='POST')
+    def post(self):
+        """
+        A POST API call to create a new service function endpoint.
+
+        :return: A JSON response to the POST call - essentially with the data of the new resource
+        :raises HTTPBadRequest: if request body is not a valid JSON for the configuration
+        :raises HTTPConflict: if the unique constraints are not preserved after the creation of a new instance
+        """
+
+        # create an instance of the model and add it to the database table
+        sf_endpoint = self._validate_and_create()
+        json_data = sf_endpoint.json
+        ServiceFunctionEndpoint.add(sf_endpoint)
+
+        self.request.response.status = 201
+
+        return json_data
+
+    @view_config(route_name='whoami_endpoints_instance', request_method='PUT')
+    def put(self):
+        """
+        A PUT API call to update a service function endpoint.
+
+        :return: A JSON response representing the updated object
+        :raises HTTPBadRequest: if the request parameters are invalid(invalid url query string)
+        :raises HTTPNotFound: if a resource with the given parameters doesn't exist in the database
+        """
+
+        sf_endpoint = self._get_sf_endpoint_from_url_string()
+        if sf_endpoint is None:
+            raise HTTPNotFound("A service function endpoint with the given parameters doesn't exist.")
+        else:
+            try:
+                body = self.request.body.decode(self.request.charset)
+                validated_body = validate_sfendpoint_body(body)  # validate the content and receive a json dictionary object
+            except AssertionError as e:
+                raise HTTPBadRequest("Bad request content. Configuration format is incorrect: {0}".format(e.args))
+
+            new_resource = validated_body
+            old_resource = sf_endpoint.json
+            updating = new_resource["sf_i"] == old_resource["sf_i"] and new_resource["sf_endpoint"] == old_resource["sf_endpoint"] and new_resource["sr"] == old_resource["sr"]
+
+            if updating:
+                ServiceFunctionEndpoint.delete(sf_endpoint)
+                new_sf_endpoint = ServiceFunctionEndpoint(**validated_body)
+                ServiceFunctionEndpoint.add(new_sf_endpoint)
+            else:
+                resource_exists = ServiceFunctionEndpoint.exists(new_resource["sf_i"], new_resource["sf_endpoint"], new_resource["sr"])
+                if resource_exists:
+                    raise HTTPConflict("Service function endpoint with this configuration already exists.")  # error 409 in case of resource conflict
+
+                new_sf_endpoint = ServiceFunctionEndpoint(**validated_body)
+                ServiceFunctionEndpoint.replace(sf_endpoint, new_sf_endpoint)
+
+            return validated_body
+
+    @view_config(route_name='whoami_endpoints_instance', request_method='DELETE')
+    def delete(self):
+        """
+        Deletes an instance of a service function endpoint configuration in the database.
+
+        :return: A content of the object that has been deleted
+        :raises HTTPBadRequest: if the request parameters are invalid(invalid url query string)
+        :raises HTTPNotFound: if a resource with the given parameters doesn't exist in the database
+        """
+
+        sf_endpoint = self._get_sf_endpoint_from_url_string()
+        if sf_endpoint is None:
+            raise HTTPNotFound("A service function endpoint with the given parameters doesn't exist.")
+        else:
+            deleted = sf_endpoint.json
+            ServiceFunctionEndpoint.delete(sf_endpoint)
+            return deleted
+
+    def _get_sf_endpoint_from_url_string(self):
+        """
+        Retrieves a service function endpoint configuration from the database by validating and then using the request url parameters.
+
+        :return: An instance of a service function endpoint configuration or None if not existing
+        """
+
+        params = {}
+        for attribute in ServiceFunctionEndpoint.constrained_columns():
+            if attribute in self.request.params:
+                params[attribute] = self.request.params.get(attribute)
+
+        try:
+            params = validate_sfendpoint_params(params)
+        except AssertionError as e:
+            raise HTTPBadRequest("Request format is incorrect: {0}".format(e.args))
+
+        sf_endpoint = ServiceFunctionEndpoint.get(**params)
+        return sf_endpoint
+
+    def _validate_and_create(self):
+        """
+        Validates the request body and checks if a resource with the given attributes already exists.
+
+        :return: a new instance of the model, if the resource doesn't exist
+        :raises HTTPBadRequest: if request body is not a valid JSON for the configuration
+        :raises HTTPConflict: if the unique constraints are not preserved after the creation of a new instance
+        """
+
+        try:
+            body = self.request.body.decode(self.request.charset)
+            validated_body = validate_sfendpoint_body(body)  # validate the content and receive a json dictionary object
+        except AssertionError as e:
+            raise HTTPBadRequest("Bad request content. Configuration format is incorrect: {0}".format(e.args))
+
+        resource = validated_body
+
+        resource_exists = ServiceFunctionEndpoint.exists(resource["sf_i"], resource["sf_endpoint"], resource["sr"])
+        if resource_exists:
+            raise HTTPConflict("Service function endpoint with this configuration already exists.")  # error 409 in case of resource conflict
+
+        # create an instance of the model
+        sf_endpoint = ServiceFunctionEndpoint(**resource)
+
+        return sf_endpoint
diff --git a/src/service/development.ini b/src/service/development.ini
index 9345526c2834678369a156178b49e5bad39d6c56..3d14f33fa23e32ac0ec50d839efcdcf4d080704f 100644
--- a/src/service/development.ini
+++ b/src/service/development.ini
@@ -14,10 +14,21 @@ pyramid.default_locale_name = en
 pyramid.includes = pyramid_debugtoolbar pyramid_exclog
 exclog.ignore =
 
-## Aggregator default configuration
-aggregator_report_period = 5
-aggregator_database_name = CLMCMetrics
-aggregator_database_url = http://172.40.231.51:8086
+# Configuration file path
+configuration_file_path = /etc/flame/clmc/service.conf
+
+network_configuration_path = /vagrant/src/service/network_config.json
+
+# PostgreSQL connection url
+sqlalchemy.url = postgresql://clmc:clmc_service@localhost:5432/whoamidb
+
+# Influx connection
+influx_host = localhost
+influx_port = 8086
+
+# Neo4j connection
+neo4j_host = localhost
+neo4j_password = admin
 
 # By default, the toolbar only appears for clients from IP addresses
 # '127.0.0.1' and '::1'.
@@ -37,7 +48,7 @@ listen = localhost:9080
 ###
 
 [loggers]
-keys = root, exc_logger, clmcservice
+keys = root, exc_logger, service_logger, sqlalchemy.engine.base.Engine
 
 [handlers]
 keys = console, filelog, exc_handler
@@ -49,7 +60,12 @@ keys = generic, exc_formatter
 level = INFO
 handlers = console
 
-[logger_clmcservice]
+[logger_sqlalchemy.engine.base.Engine]
+level = INFO
+handlers =
+qualname = sqlalchemy.engine.base.Engine
+
+[logger_service_logger]
 level = INFO
 handlers = filelog
 qualname = service_logger
diff --git a/src/service/network_config.json b/src/service/network_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c8ca7cb724ffe57a1d8a62d28b7b845460445676
--- /dev/null
+++ b/src/service/network_config.json
@@ -0,0 +1,47 @@
+{
+  "bandwidth": 104857600,
+  "links": [
+    {
+      "source": "DC1",
+      "target": "DC2",
+      "min_response_time": 10,
+      "max_response_time": 20,
+      "avg_response_time": 15
+    },
+    {
+      "source": "DC2",
+      "target": "DC1",
+      "min_response_time": 16,
+      "max_response_time": 28,
+      "avg_response_time": 22
+    },
+    {
+      "source": "DC1",
+      "target": "DC3",
+      "min_response_time": 17,
+      "max_response_time": 19,
+      "avg_response_time": 18
+    },
+    {
+      "source": "DC3",
+      "target": "DC1",
+      "min_response_time": 15,
+      "max_response_time": 25,
+      "avg_response_time": 20
+    },
+    {
+      "source": "DC2",
+      "target": "DC3",
+      "min_response_time": 11,
+      "max_response_time": 29,
+      "avg_response_time": 20
+    },
+    {
+      "source": "DC3",
+      "target": "DC2",
+      "min_response_time": 12,
+      "max_response_time": 40,
+      "avg_response_time": 26
+    }
+  ]
+}
\ No newline at end of file
diff --git a/src/service/production.ini b/src/service/production.ini
index 2e1cfcf66101b9eec64b22d8a71c2bd72b092961..33c7ca6cb032aceffb2ac12e21af9416ab1624d2 100644
--- a/src/service/production.ini
+++ b/src/service/production.ini
@@ -14,10 +14,21 @@ pyramid.default_locale_name = en
 pyramid.includes = pyramid_exclog
 exclog.ignore =
 
-## Aggregator default configuration
-aggregator_report_period = 5
-aggregator_database_name = CLMCMetrics
-aggregator_database_url = http://172.40.231.51:8086
+# Configuration file path
+configuration_file_path = /etc/flame/clmc/service.conf
+
+network_configuration_path = /vagrant/src/service/network_config.json
+
+# PostgreSQL connection url
+sqlalchemy.url = postgresql://clmc:clmc_service@localhost:5432/whoamidb
+
+# Influx connection
+influx_host = localhost
+influx_port = 8086
+
+# Neo4j connection
+neo4j_host = localhost
+neo4j_password = admin
 
 ###
 # wsgi server configuration
@@ -33,7 +44,7 @@ listen = *:9080
 ###
 
 [loggers]
-keys = root, exc_logger, service_logger
+keys = root, exc_logger, service_logger, sqlalchemy.engine.base.Engine
 
 [handlers]
 keys = console, filelog, exc_handler
@@ -45,6 +56,11 @@ keys = generic, exc_formatter
 level = INFO
 handlers = console
 
+[logger_sqlalchemy.engine.base.Engine]
+level = INFO
+handlers =
+qualname = sqlalchemy.engine.base.Engine
+
 [logger_service_logger]
 level = INFO
 handlers = filelog
diff --git a/src/service/setup.py b/src/service/setup.py
index 802f7416861d09a25760c857e16b9898fc8c2243..c3fbf0e617197626ec492eec7553feae7620d31d 100644
--- a/src/service/setup.py
+++ b/src/service/setup.py
@@ -46,25 +46,29 @@ requires = [
     'pyramid_debugtoolbar',
     'pyramid_exclog',
     'waitress',
+    'sqlalchemy',
+    'zope.sqlalchemy',
+    'psycopg2',
     'influxdb',
-    'pytest',
+    'neo4j-driver',
+    'py2neo'
 ]
 
 tests_require = [
-    'WebTest >= 1.3.1',  # py3 compat
-    'pytest-cov',
+    'pytest',
+    'pytest-cov'
 ]
 
 setup(
-    name = "clmcservice",
-    version = get_version("_version.py"),
-    author = "Michael Boniface",
-    author_email = "mjb@it-innovation.soton.ac.uk",
-    description = "FLAME CLMC Service Module",
+    name="clmcservice",
+    version=get_version("_version.py"),
+    author="Michael Boniface",
+    author_email="mjb@it-innovation.soton.ac.uk",
+    description="FLAME CLMC Service Module",
     long_description="FLAME CLMC Service",
-    license = "https://gitlab.it-innovation.soton.ac.uk/FLAME/flame-clmc/blob/integration/LICENSE",
-    keywords = "FLAME CLMC service",
-    url = 'https://gitlab.it-innovation.soton.ac.uk/FLAME/flame-clmc',
+    license="https://gitlab.it-innovation.soton.ac.uk/FLAME/flame-clmc/blob/integration/LICENSE",
+    keywords="FLAME CLMC service",
+    url='https://gitlab.it-innovation.soton.ac.uk/FLAME/flame-clmc',
     packages=find_packages(),
     include_package_data=True,
     install_requires=requires,
@@ -81,5 +85,8 @@ setup(
         'paste.app_factory': [
             'main = clmcservice:main',
         ],
+        'console_scripts': [
+            'initialize_clmcservice_db = clmcservice.initialize_db:main',
+        ]
     },
-)
\ No newline at end of file
+)
diff --git a/src/test/clmctest/monitoring/test_e2eresults.py b/src/test/clmctest/monitoring/test_e2eresults.py
index 97d46db116b66b8cfbd2c086ffca7638765a12ab..9c957d684c677c39539c22570eb328a155a1af16 100644
--- a/src/test/clmctest/monitoring/test_e2eresults.py
+++ b/src/test/clmctest/monitoring/test_e2eresults.py
@@ -23,7 +23,6 @@
 """
 
 import pytest
-import random
 import time
 import requests
 import urllib.parse
@@ -42,8 +41,6 @@ class TestE2ESimulation(object):
         :param e2e_simulator: the simulator for the end-to-end data
         """
 
-        random.seed(0)  # Seed random function so we can reliably test for average queries
-
         # Configure the aggregator through the CLMC service
         influx_url = urllib.parse.urlparse(e2e_simulator.db_url)
         aggregator_control_url = "http://{0}:9080/aggregator/control".format(influx_url.hostname)