diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index c24d77f9cd9a3f6e787fc614eace8a55007c9a13..e2a7098b150f40a07e3a581e4da07665cb624dd8 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -35,8 +35,8 @@ build:tests:
     - python setup.py sdist --dist-dir=$CI_PROJECT_DIR/build
   artifacts:
     paths:
-    - build/clmctest-2.3.1.tar.gz
-    - build/clmcservice-2.3.1.tar.gz
+    - build/clmctest-2.4.0.tar.gz
+    - build/clmcservice-2.4.0.tar.gz
     expire_in: 1 day
 
 test:all:
@@ -50,8 +50,8 @@ test:all:
     - echo "REPO_PASS=${REPO_PASS}" >> $CI_PROJECT_DIR/reporc
     - sudo scripts/test/fixture.sh create -f src/test/clmctest/rspec.json -r $CI_PROJECT_DIR -c all
     - sudo mkdir /var/lib/lxd/containers/test-runner/rootfs/opt/clmc/build
-    - sudo cp build/clmctest-2.3.1.tar.gz /var/lib/lxd/containers/test-runner/rootfs/opt/clmc/build
-    - sudo lxc exec test-runner -- pip3 install /opt/clmc/build/clmctest-2.3.1.tar.gz
+    - sudo cp build/clmctest-2.4.0.tar.gz /var/lib/lxd/containers/test-runner/rootfs/opt/clmc/build
+    - sudo lxc exec test-runner -- pip3 install /opt/clmc/build/clmctest-2.4.0.tar.gz
     - sudo lxc exec test-runner -- pytest -s --tb=short -rfp --pyargs clmctest
   when: on_success      
   
diff --git a/IPRREGISTRY.md b/IPRREGISTRY.md
index bdc19dcdf58c12f447bee02e6ccd4059ebf40236..a935a8585abd4ca86d273e6a2b7336f04a7a518f 100644
--- a/IPRREGISTRY.md
+++ b/IPRREGISTRY.md
@@ -4,35 +4,31 @@ The CLMC depends on 3rd party open source software distributed using approved op
 
 ## Unmodified Libraries
 
-| Library | License | License Link |
-| ------- | ------- | ------------ |
-| pyramid | Repoze Public License (BSD-derived) http://repoze.org/license.html |https://github.com/Pylons/pyramid/blob/master/LICENSE.txt |
-| pyramid_debugtoolbar | Repoze Public License (BSD-derived) http://repoze.org/license.html |	https://github.com/Pylons/pyramid_debugtoolbar/blob/master/LICENSE.txt |
-| pyramid_exclog | Repoze Public License (BSD-derived)  http://repoze.org/license.html |	https://github.com/Pylons/pyramid_exclog/blob/master/LICENSE.txt |
-| plaster_pastedeploy | MIT LICENSE | https://github.com/Pylons/plaster_pastedeploy/blob/master/LICENSE.txt |
-| waitress | ZOPE PUBLIC LICENSE (ZPL) v2.1 | https://github.com/Pylons/waitress/blob/master/LICENSE.txt |
-| sqlalchemy | MIT LICENSE | https://github.com/sqlalchemy/sqlalchemy/blob/master/LICENSE |	
-| zope.sqlalchemy | ZOPE PUBLIC LICENSE (ZPL) v2.1 |https://github.com/zopefoundation/zope.sqlalchemy/blob/master/LICENSE.txt |
-| psycopg2 | GNU LESSER GENERAL PUBLIC LICENSE v3| https://github.com/psycopg/psycopg2/blob/master/LICENSE |
-| influxdb-python | MIT LICENSE	| https://github.com/influxdata/influxdb-python/blob/master/LICENSE |
-| py2neo | APACHE LICENSE v2 | https://github.com/technige/py2neo/blob/py2neo-4.1.3/LICENSE |	
-| pyyaml | MIT LICENSE | https://github.com/yaml/pyyaml/blob/master/LICENSE |	
-| tosca-parser | APACHE LICENSE v2 | https://github.com/openstack/tosca-parser/blob/master/LICENSE |
-| schema | MIT LICENSE | https://github.com/keleshev/schema/blob/master/LICENSE-MIT |	
-| requests | APACHE LICENSE v2 | https://github.com/requests/requests/blob/master/LICENSE |
-| psutil | BSD LICENSE | https://github.com/giampaolo/psutil/blob/master/LICENSE |
-| pytest | MIT LICENSE | https://github.com/pytest-dev/pytest/blob/master/LICENSE |	
-| pytest-cov | MIT LICENSE | https://github.com/pytest-dev/pytest-cov/blob/master/LICENSE |
+| Library | Version | License | License Link |
+| ------- | ------- | ------- | ------------ |
+| pyramid | 1.9.2 | Repoze Public License (BSD-derived) http://repoze.org/license.html | https://github.com/Pylons/pyramid/blob/1.9.2/LICENSE.txt |
+| pyramid_debugtoolbar | 4.5 | Repoze Public License (BSD-derived) http://repoze.org/license.html | https://github.com/Pylons/pyramid_debugtoolbar/blob/4.5/LICENSE.txt |
+| pyramid_exclog | 1.0 | Repoze Public License (BSD-derived)  http://repoze.org/license.html | https://github.com/Pylons/pyramid_exclog/blob/1.0/LICENSE.txt |
+| plaster_pastedeploy | 0.6 | MIT LICENSE | https://github.com/Pylons/plaster_pastedeploy/blob/0.6/LICENSE.txt |
+| waitress | 1.1.0 | ZOPE PUBLIC LICENSE (ZPL) v2.1 | https://github.com/Pylons/waitress/blob/v1.1.0/LICENSE.txt |
+| influxdb-python | 5.2.0 | MIT LICENSE	| https://github.com/influxdata/influxdb-python/blob/v5.2.0/LICENSE |
+| py2neo | 4.2.0 | APACHE LICENSE v2 | https://github.com/technige/py2neo/blob/v4.2/LICENSE |	
+| pyyaml | 3.13 | MIT LICENSE | https://github.com/yaml/pyyaml/blob/3.13/LICENSE |	
+| tosca-parser | 1.1.0 | APACHE LICENSE v2 | https://github.com/openstack/tosca-parser/blob/1.1.0/LICENSE |
+| schema | 0.6.8 | MIT LICENSE | https://github.com/keleshev/schema/blob/v0.6.8/LICENSE-MIT |	
+| requests | 2.21.0 | APACHE LICENSE v2 | https://github.com/psf/requests/blob/v2.21.0/LICENSE |
+| psutil | 5.6.1 | BSD 3-Clause LICENSE | https://github.com/giampaolo/psutil/blob/release-5.6.1/LICENSE |
+| pytest | 3.8.1 | MIT LICENSE | https://github.com/pytest-dev/pytest/blob/3.8.1/LICENSE |	
+| pytest-cov | 2.6.0 | MIT LICENSE | https://github.com/pytest-dev/pytest-cov/blob/v2.6.0/LICENSE |
 			
 ## Unmodified Service Prerequisites
 
 The following services need to be installed by the recipient as prerequisites for the CLMC
 
-| Library | License | License Link |
-| ------- | ------- | ------------ |
-| InfluxDB | MIT LICENSE | https://github.com/influxdata/influxdb/blob/master/LICENSE |
-| Chronograf | GNU AFFERO GENERAL PUBLIC LICENSE v3 |  https://github.com/influxdata/chronograf/blob/master/LICENSE |
-| Kapacitor | MIT LICENSE | https://github.com/influxdata/kapacitor/blob/master/LICENSE |
-| Neo4j | GNU GENERAL PUBLIC LICENSE v3 | https://neo4j.com/licensing/ |			
-| PostgreSQL | PostgreSQL License | https://www.postgresql.org/about/licence/ |
-			
+| Library | Version | License | License Link |
+| ------- | ------- | ------- | ------------ |
+| InfluxDB | 1.6.5 | MIT LICENSE | https://github.com/influxdata/influxdb/blob/v1.6.5/LICENSE |
+| Chronograf | 1.4.4.2 | GNU AFFERO GENERAL PUBLIC LICENSE v3 |  https://github.com/influxdata/chronograf/blob/1.4.4.2/LICENSE |
+| Kapacitor | 1.4.1 | MIT LICENSE | https://github.com/influxdata/kapacitor/blob/v1.4.1/LICENSE |
+| Neo4j | 3.4.0 | GNU GENERAL PUBLIC LICENSE v3 | https://neo4j.com/licensing/ |
+| Nginx | latest | BSD 2-Clause LICENSE | http://nginx.org/LICENSE |
diff --git a/docs/clmc-development-guide.md b/docs/clmc-development-guide.md
new file mode 100644
index 0000000000000000000000000000000000000000..cf8b7e95ad3399dcaa07fb9f2dfee78303fb8c2c
--- /dev/null
+++ b/docs/clmc-development-guide.md
@@ -0,0 +1,367 @@
+<!--
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          05-08-2018
+//      Created for Project :   FLAME
+-->
+
+# **FLAME CLMC Development Guide**
+
+### **Authors**
+
+|Authors|Organisation|                    
+|:---:|:---:|  
+|[Nikolay Stanchev](mailto:ns17@it-innovation.soton.ac.uk)|[University of Southampton, IT Innovation Centre](http://www.it-innovation.soton.ac.uk)|
+
+### Description
+
+This document describes the internal implementations of the CLMC and provides guidelines on how to develop, extend and maintain the project.
+
+### Top-level view
+
+CLMC consists of the following software components:
+
+* **InfluxDB** - open-source external implementation - https://github.com/influxdata/influxdb - used for storing time-series data
+* **Chronograf** - open-source external implementation - https://github.com/influxdata/chronograf - used for visualising time-series data
+* **Kapacitor** - open-source external implementation - https://github.com/influxdata/kapacitor - used for managing alerts and trigger notifications
+* **Neo4j** - open-source external implementation - https://github.com/neo4j/neo4j - used for performing graph-based monitoring
+* **Nginx** - open-source external implementation - https://github.com/nginx/nginx - used as a reverse proxy in front of the other CLMC APIs and services
+* **clmc-service** - internal implementation - provides API endpoints for different monitoring/alerting-related features
+
+### Managing and installing software components
+
+* *InfluxDB, Kapacitor, Chronograf* - service version number and an installation script are located in **scripts/clmc-service/install-tick-stack.sh**
+* *Neo4j* - service version number and an installation script are located in **scripts/clmc-service/install-neo4j.sh**
+* *clmc-service* - service installation script located in **scripts/clmc-service/install-clmc-service.sh**
+* *nginx* - service version number and an installation script located in **scripts/clmc-service/install-nginx.sh**
+
+In general, the preferred way to install CLMC will be to use the **scripts/clmc-service/install.sh** script which calls the install scripts mentioned above in the correct order and checks if the correct environment variables have been provided. The required environment variables are:
+
+* *SDN_CONTROLLER_IP* - the IP address for CLMC to use when collecting network data from an SDN controller.
+* *NETWORK_DEPENDENCY* - the systemd dependency after which CLMC should start, e.g. *network.target*
+* *SFEMC_FQDN* - the FQDN for CLMC to use when communicating with the FLAME SFEMC component
+
+After running the install script, there must be 5 systemd services - kapacitor, influxdb, chronograf, neo4j and flameclmc. These could be managed with *systemctl*:
+
+* `systemctl status flameclmc`
+* `systemctl status influxdb`
+* `systemctl status kapacitor`
+* `systemctl status chronograf`
+* `systemctl status neo4j`
+* `systemctl status nginx`
+
+### Configuration of clmc-service
+
+The clmc-service software component is managed through a systemd unit file which is generated by the installation script and located in **/lib/systemd/system/flameclmc.service**. Thus, the clmc-service can be started/restarted/stopped by running:
+
+* `systemctl start flameclmc`
+* `systemctl restart flameclmc`
+* `systemctl stop flameclmc`
+
+In addition, the installation script generates a start script (this is what the systemd service executes to start the CLMC service). This is located in **/opt/flame/clmc/start.sh**. Through this script, the service could be further reconfigured with the following environment variables:
+
+* **SFEMC_FQDN** - defaults to what was used during installation.
+* **SDN_CONTROLLER_IP** - defaults to what was used during installation.
+* **SFEMC_PORT** - the port number the CLMC service uses when communicating with the FLAME SFEMC component, defaults to 8080.
+* **SDN_CONTROLLER_PORT** - the port number the CLMC service uses when communicating with the SDN controller, defaults to 8080.
+
+To reconfigure any of the environment variables above, add/edit an export line in the start script, for example:
+
+`export SFEMC_PORT=5000`
+
+The internal configuration of clmc-service (e.g. how to connect to Kapacitor) is managed through **{production/development}.ini** files. For example, the following configuration variables control how the connection with Kapacitor is established:
+
+* *kapacitor_host*
+* *kapacitor_port*
+
+These files can be found at **src/service/production.ini** and **src/service/development.ini**. To change configuration at run-time, edit the **src/service/production.ini** and restart the clmc-service with:
+
+`systemctl restart flameclmc`
+
+### CLMC services default configuration
+
+Currently, everything is deployed on the same server, therefore, clmc-service uses *localhost* to connect to any of its prerequisites.
+
+* **InfluxDB** - listens on its default port 8086
+* **Chronograf** - listens on its default port 8888
+* **Kapacitor** - listens on its default port 9092
+* **Neo4j** (uses protocols HTTP and Bolt) - HTTP API listens on default port 7474, Bolt API listens on default port 7687 
+* **clmc-service** - listens on port 9080
+
+To alleviate connections to CLMC, a reverse proxy (nginx) is installed during deployment with the following configuration:
+
+* proxy listening on port 80
+* URLs starting with **/kapacitor** are forwarded to Kapacitor without altering the request
+* URLs starting with **/influxdb** are forwarded to InfluxDB by stripping the */influxdb* part of the request URL
+* URLs starting with **/chronograf** are forwarded to Chronograf without altering the request (Chronograf is configured to use base path */chronograf*)
+* URLs starting with **/neo4j** are forwarded to Neo4j by stripping the */neo4j* part of the request URL
+* URLs starting with **/clmc-service** are forwarded to the CLMC service by stripping the */clmc-service* part of the request URL
+
+Each service manages its own logging software:
+
+**InfluxDB** - `journalctl -fu influxdb`
+
+**Chronograf** - `journalctl -fu chronograf`
+
+**Kapacitor** - `tail -f /var/log/kapacitor/kapacitor.log`
+
+**Neo4j** - `tail -f /var/log/neo4j/debug.log`
+
+**Nginx**
+
+* `tail -f /var/log/nginx/access.log` - requests log file
+* `tail -f /var/log/nginx/error.log` - errors log file
+
+**clmc-service**
+
+* `tail -f /var/log/flame/clmc/service.log` - general service logging while handling API requests
+* `tail -f /var/log/flame/clmc/service-exceptions.log` - logging of unexpected errors while handling API requests
+* `journalctl -fu flameclmc` - logging of all the above plus logging from the graph-based monitoring script
+
+The logging configuration of the CLMC service can be found at **src/service/production.ini** and **src/service/development.ini**. Defaults to rotating file-based logging with 5 backup files and each file configured to store 40MB at most.
+
+### CLMC project structure
+
+**docs/** - any documentation related to CLMC can be found in this folder, some important files:
+
+* docs/AlertsSpecification.md - the documentation for the TOSCA alerts configuration
+* docs/clmc-service.md - the documentation of the CLMC service API endpoints
+* docs/graph-monitoring-user-guide.md - the user guide for using graph-based monitoring of CLMC
+* docs/total-service-request-delay.md - calculation model of the CLMC graph API for estimating round-trip time of a service
+* docs/clmc-information-model.md - documentation on the CLMC information model
+
+**scripts/clmc-agent** - scripts and configuration files for installing Telegraf on service functions.
+
+**scripts/clmc-service** - installation scripts for CLMC including a reverse proxy configuration - *nginx.conf*, and the graph monitoring script - *graph-pipeline.sh*
+
+**scripts/test/fixture.sh** - a script for setting up and tearing down the CLMC test environment
+
+**src/service** - the source code implementation of the CLMC service including tests for the API features
+
+**src/test** - integration tests which require the full CLMC test environment
+
+### CLMC service implementation details
+
+The CLMC service is implemented in *Python* using the web framework called **Pyramid**. All source code, tests and configuration files of the service can be found in the **src/service** folder.
+
+In general, the structure follows a typical python package structure, with the root package being called **clmcservice** and a setup.py provided on the same folder level. Python dependencies (external python packages) are declared in two places:
+
+* **setup.py** file located at *src/service/setup.py* - defines the explicit Python dependencies, that is the external packages that are used within the CLMC service source code
+* **requirements.txt** file located at *src/service/requirements.txt* - defines the full list of Python dependencies (both explicit and implicit) - these are all the packages that are used either within the CLMC source code or are used by external packages that the CLMC service depends on
+
+To manage these dependencies, we use a Python virtual environment (through *virtualenv* and *virtualenvwrapper*). The virtual environment is created by the installation script but in case it needs to be manually created:
+
+```bash
+export WORKON_HOME=$HOME/.virtualenvs
+source /usr/local/bin/virtualenvwrapper.sh
+mkvirtualenv CLMC
+```
+
+If the virtual environment has been already created, run the following to get into it:
+
+```bash
+export WORKON_HOME=$HOME/.virtualenvs
+source /usr/local/bin/virtualenvwrapper.sh
+workon CLMC
+```
+
+Then to replicate the Python environment for the CLMC service, use the *requirements.txt* file which has all the external packages (explicit and implicit) with a fixed version number:
+
+```bash
+cd src/service
+pip3 install -r requirements.txt
+```
+
+Finally, to install the CLMC service python package, run the following:
+
+```bash
+cd src/service
+pip3 install .
+```
+
+Once the *clmcservice* package has been installed, the *Pyramid* application can be started with the *waitress* application server by using either of the *production.ini* or *development.ini* files.
+
+```bash
+pserve src/service/production.ini
+```
+
+All of the instructions above are executed by the clmc-service install script.
+
+The source code is organised in various python subpackages. Each subpackage is the implementation of a given API (including unit and local integration tests) with the exception of the **models** package which is responsible for things like object relational mappings.
+
+* **src/service/clmcservice/alertsapi** - the source code of the CLMC alerts API used for managing alerts and trigger notifications
+
+* **src/service/clmcservice/graphapi** - the source code of the CLMC graph API used for calculating round-trip time and performing graph-based measurements
+
+* **src/service/clmcservice/managementapi** - the source code of the CLMC data management API used for managing (e.g. deleting) the data of a service function chain 
+
+* **src/service/clmcservice/models** - package for any persistency related code (e.g. object relational mappings)
+
+* **src/service/clmcservice/static** - static files that are required by the CLMC service, e.g. the TOSCA alerts definitions file
+
+* **src/service/clmcservice/tweens.py** - tweens a.k.a. middlewares, code that is executed for each request before it is processed by its respective API function
+
+* **src/service/clmcservice/\_\_init\_\_.py** - the entry point of the CLMC service, **this is where tweens (middlewares) and API routes are registered**
+
+Additional files that are not part of the Python package implementation but are used as resources by the CLMC services:
+
+* **src/service/resources/GraphAPI** - static mappings between the IP address of a service router to the name of an emulated UE or a cluster; these files are used as a temporary solution until we have a solid approach of retrieving this information from the SFR (Service Function Routing) component of FLAME
+
+* **src/service/resources/TICKscript** - TICK script implementations for Kapacitor task templates that are used by CLMC to configure alerts and trigger notifications
+
+* **src/service/resources/tosca** - TOSCA test data, including valid/invalid alerts configuration and resource specification files
+
+### Releasing a new version of CLMC service
+
+Ultimately the process of releasing a new CLMC version is achieved by creating a merge request to the **integration** branch of the **flame-clmc** project. The description of the merge request should act as a CHANGELOG for the new version and the title should specify the new version number.
+
+In addition, there are a few files which must be updated with the new CLMC version number before submitting the merge request:
+
+* **src/service/VERSION** (line 1) - ensures the clmcservice python package is installed with the correct CLMC version number
+
+* **src/test/VERSION** (line 1) - ensures the clmctest python package is installed with the correct CLMC version number
+
+* **gitlab-ci.yml** (line 38, 39, 53 and 54) - ensures the gitlab CI runs with the correct version numbers
+
+### Updating the list of CLMC dependencies
+
+* Service prerequisites - if a new service is used in the CLMC implementation, the **IPRREGISTRY.md** must be updated by adding the service and its LICENSE (license type and link) to the list of **Unmodified Service Prerequisites**
+
+* Python dependencies - if a new python package is used in the CLMC service implementation, the **src/service/setup.py**, **src/service/requirements.txt** and **IPRREGISTRY.md** files must be updated
+
+    * **setup.py** - add the package name and package version in the list of required packages to install 
+    
+    * **IPREGISTRY.md** - add the package name, license type and license link to the list of **Unmodified Libraries**
+    
+    * **requirements.txt** - generate a new requirements.txt file which must include the new package as well as its package dependencies with fixed version numbers
+
+### How to generate a requirements.txt file for replicating the Python virtual environment
+
+To generate the full list of packages in the Python virtual environment, simply run:
+
+```bash
+export WORKON_HOME=$HOME/.virtualenvs
+source /usr/local/bin/virtualenvwrapper.sh
+workon CLMC
+pip3 freeze > requirements.txt
+```
+
+Then, by running the command below, *pip* will install all packages going line by line through the **requirements.txt** file.
+
+```bash
+pip3 install -r requirements.txt
+``` 
+
+However, the output list from the command above is not sorted in a way that packages which depend on other packages are placed (and later installed) after their dependencies. This could lead to incorrect package versions being installed (not every python package fixes the versions of its dependencies) which might later turn to be a dependency conflict between different packages. To avoid this issue, we fix the versions of every package in the Python virtual environment (explicit and implicit) and generate an ordered list so that a package is only installed once all its dependencies are installed. To achieve this, we use two utilities:
+
+* **pipdeptree** https://pypi.org/project/pipdeptree/ - used to generate a tree-like list showing package dependencies (explicit and implicit)
+
+* custom utility script located at **scripts/clmc-service/dependencies_util.py** - used to sort the output of **pipdeptree** so that packages are only listed after all of their dependencies have been listed 
+
+The procedure to generate a properly sorted **requirements.txt** file is the following:
+
+```bash
+# go into the CLMC container
+lxc exec clmc-service -- bash
+# switch to the CLMC Python virtual environment
+export WORKON_HOME=$HOME/.virtualenvs
+source /usr/local/bin/virtualenvwrapper.sh
+workon CLMC
+# install pipdeptree if not installed already
+pip3 install pipdeptree
+# check output of pipdeptree (--reverse flag is used, so that the tree shows leaf packages first and the packages that require them underneath)
+pipdeptree --reverse
+# run the utility script with the output of pipdeptree as input
+cd /opt/clmc/scripts/clmc-service
+python3 dependencies_util.py "$(pipdeptree --reverse)"
+```
+
+### CLMC test environment
+
+The CLMC test environment is built using:
+
+* **vagrant** and **virtualbox** - to get a Unix-based environment on a Windows OS
+* **lxd** - to manage containerized deployments of a number of simple service functions as well as a CLMC deployment
+
+With the provided Vagrantfile in the **flame-clmc** project, simply run the following to start and get access to the virtual machine:
+
+```bash
+vagrant up
+vagrant ssh
+```
+
+Note: Virtualbox and vagrant must be installed. In addition, the vagrant disk size plugin is also required.
+
+```bash
+vagrant plugin install vagrant-disksize
+```
+
+Vagrant is configured to synchronise the VM's */vagrant* folder with the content of the *flame-clmc* project folder.
+
+The LXC containers are controlled with the fixture script located at *scripts/test/fixture.sh*.
+
+```
+Usage: fixture.sh create|start|stop|destroy [-f config_file] [-r repo_root] [-c container_name|all]
+```
+
+The test environment is defined in the configuration file located at *src/test/clmctest/rspec.json*
+
+An example on how to create the clmc-service container:
+
+```bash
+/vagrant/scripts/test/fixture.sh create -f /vagrant/src/test/clmctest/rspec.json -c clmc-service
+```
+
+The installation of several of the services depend on accessing the Nexus binary repository (for the custom Telegraf agent). To do this, a username and password for the repository must be specified in a `reporc` file in the user's home directory, e.g.
+
+```bash
+REPO_USER=itinnov.flame.integration
+REPO_PASS=xxxxxxx
+```
+
+An example on how to create the full test environment with all containers:
+
+```bash
+sudo su
+/vagrant/scripts/test/fixture.sh create -f /vagrant/src/test/clmctest/rspec.json -c all
+```
+
+The fixture script will fail if any individual service installation fails to install (or fails its tests).
+
+To get access to any of the containers run:
+
+```bash
+lxc exec <container name> -- bash
+```
+
+For example:
+
+```bash
+lxc exec clmc-service -- bash
+```
+
+```bash
+lxc exec test-runner -- bash
+```
+
+All tests are implemented using *Python* and *Pytest*. The integration tests (located at *src/test*) are installed in the **test-runner** container. To execute these, run:
+
+```bash
+lxc exec test-runner -- pytest -s --tb=short -rfp --pyargs /opt/clmc/src/test/clmctest
+```
diff --git a/docs/clmc-service.md b/docs/clmc-service.md
index 02f7f7f7a7afba4df29d629ca8e42820e928365c..29b858167650722440a7ef6af8166b3e21f739f9 100644
--- a/docs/clmc-service.md
+++ b/docs/clmc-service.md
@@ -31,34 +31,19 @@
 
 #### Description
 
-This document describes the CLMC service and its API endpoints. The CLMC service is implemented using the *Python* web framework called **Pyramid**.
-It offers different API endpoints such as GraphAPI for calculating round trip time, CRUD API for service function endpoints 
-configuration data and Alerts API for creating and subscribing to alerts in Kapacitor. All source code, tests and 
-configuration files of the service can be found in the **src/service** folder.
+This document describes the CLMC service and its API endpoints such as the graph API for calculating round trip time and the alerts API for creating and subscribing to alerts in Kapacitor.
 
 #### Notes
+
 * Interacting with *Chronograf* - use `http://<clmc-host>/chronograf`. You will be asked to enter connection details.
-The only input that you need to edit is the *Connection String* - set it to `http://<clmc-host>:8086` and click the
-**Add Source** button.
 
-* Interacting with *Kapacitor* - the Kapacitor HTTP API documentation can be found here: https://docs.influxdata.com/kapacitor/v1.4/working/api/
-Notice that all of the URL paths provided by Kapacitor are already namespaced using base path ***/kapacitor/v1***. Therefore, no other prefix is required
-when interacting with the Kapacitor application running on the clmc container, e.g.  
-`http://<clmc-host>/kapacitor/v1/tasks` 
-as described in the Kapacitor API reference.
+* Interacting with *Kapacitor* - the Kapacitor HTTP API documentation can be found here: https://docs.influxdata.com/kapacitor/v1.4/working/api/. Notice that all of the URL paths provided by Kapacitor are already namespaced using base path ***/kapacitor/v1***. Therefore, no other prefix is required when interacting with the Kapacitor application running on the clmc container, e.g. `http://<clmc-host>/kapacitor/v1/tasks` as described in the Kapacitor API reference.
 
-* Interacting with *InfluxDB* - the InfluxDB HTTP API documentation can be found here: https://docs.influxdata.com/influxdb/v1.5/tools/api/
-In order to interact with the InfluxDB application running on the clmc container, prefix all URL paths in the documentation 
-with **/influxdb**, e.g.  
-`http://<clmc-host>/influxdb/query`
+* Interacting with *InfluxDB* - the InfluxDB HTTP API documentation can be found here: https://docs.influxdata.com/influxdb/v1.5/tools/api/. In order to interact with the InfluxDB application running on the clmc container, prefix all URL paths in the documentation with **/influxdb**, e.g. `http://<clmc-host>/influxdb/query`
 
-* Interacting with *neo4j* - use `http://<clmc-host>/neo4j/browser/`. This will open the neo4j browser, which lets you
-interact with the graph using Cypher queries (if necessary).
+* Interacting with *Neo4j* - use `http://<clmc-host>/neo4j/browser/`. This will open the neo4j browser which lets you interact with the graph using Cypher queries (if necessary).
 
-* Interacting with *clmc-serivce* - the API endpoints listed in the following sections relate to direct interactions with the clmc-service 
-application server (listening on port 9080). If interacting with the clmc container, all of the listed below URIs must be prefixed 
-with **/clmc-service** so that the nginx reverse proxy server (listening on port 80) can forward to requests to the correct application, e.g.  
-`http://<clmc-host>/clmc-service/alerts?sfc={service function chain id}&sfci={service function chain instance id}&policy={policy id}&trigger={trigger id}`.
+* Interacting with *clmc-serivce* - the API endpoints listed in the following sections relate to direct interactions with the clmc-service application server (listening on port 9080). If interacting with the clmc container, all of the URLs listed below must be prefixed  with **/clmc-service** so that the nginx reverse proxy server (listening on port 80) can forward the requests to the correct application, e.g. `http://<clmc-host>/clmc-service/alerts/{service function chain id}`.
 
 ## Alerts API Endpoints
 
@@ -742,196 +727,3 @@ with **/clmc-service** so that the nginx reverse proxy server (listening on port
            "deleted_ues_count": 3
         }
         ```
-
-## CRUD API for service function endpoint configurations
-
-**Note: this API is experimental and is not intended to be used at this stage**
-
-* **GET** ***/whoami/endpoints***
-
-    This API method retrieves all service function endpoint configurations in a JSON format.
-
-    * Response:
-
-        Returns a JSON-formatted response - a list of JSON objects, each object representing a service function endpoint configuration.
-
-    * Response Body Example:
-
-        - No service function endpoint configurations found.
-        ```json
-        []
-        ```
-
-        - Multiple service function endpoint configurations found.
-        ```json
-        [
-          {
-           "location": "location_1",
-           "server": "location_1",
-           "sfc": "sfc_1",
-           "sfc_instance": "sfc_i_1",
-           "sf_package": "sf_1",
-           "sf": "sf_i_1",
-           "sf_endpoint": "sf_endpoint_1"
-           },
-          {
-           "location": "location_2",
-           "server": "location_2",
-           "sfc": "sfc_2",
-           "sfc_instance": "sfc_i_2",
-           "sf_package": "sf_2",
-           "sf": "sf_i_2",
-           "sf_endpoint": "sf_endpoint_2"
-           }
-        ]
-        ```
-
-* **GET** ***/whoami/endpoints/instance?sf_endpoint={sf_endpoint_id}***
-
-    This API method retrieves the uniquely defined service function endpoint configuration associated with the given URL parameter - sf_endpoint.
-
-    * Response:
-
-        Returns a JSON-formatted response - a JSON object representing the service function endpoint configuration if it exists.
-
-        Returns a 404 Not Found error if there is no service function endpoint configuration associated with the given URL parameter.
-
-        Returns a 400 Bad Request error if the url parameter is invalid or missing.
-
-    * Response Body Example:
-
-        - Request made to /whoami/endpoints/instance?sf_endpoint=sf_endpoint_1
-        ```json
-          {
-           "location": "location_1",
-           "server": "location_1",
-           "sfc": "sfc_1",
-           "sfc_instance": "sfc_i_1",
-           "sf_package": "sf_1",
-           "sf": "sf_i_1",
-           "sf_endpoint": "sf_endpoint_1"
-          }
-        ```
-
-* **POST** ***/whoami/endpoints***
-
-    This API method creates a new service function endpoint configuration.
-
-    * Request:
-
-        Expects a JSON-formatted request body with the new service function endpoint configuration.
-
-    * Request Body Example:
-
-        ```json
-          {
-           "location": "location_1",
-           "server": "location_1",
-           "sfc": "sfc_1",
-           "sfc_instance": "sfc_i_1",
-           "sf_package": "sf_1",
-           "sf": "sf_i_1",
-           "sf_endpoint": "sf_endpoint_1"
-          }
-        ```
-
-    * Response
-
-        Returns a JSON-formatted response - a JSON object representing the service function endpoint configuration that was created.
-
-        Returns a 400 Bad Request error if the request body is invalid.
-
-        Returns a 409 Conflict error if there exists another service function endpoint configuration with the same 'sf_endpoint' ID.
-
-    * Response Body Example:
-
-        ```json
-          {
-           "location": "location_1",
-           "server": "location_1",
-           "sfc": "sfc_1",
-           "sfc_instance": "sfc_i_1",
-           "sf_package": "sf_1",
-           "sf": "sf_i_1",
-           "sf_endpoint": "sf_endpoint_1"
-          }
-        ```
-
-* **PUT** ***/whoami/endpoints/instance?sf_endpoint={sf_endpoint_id}***
-
-    This API method replaces the uniquely defined service function endpoint configuration associated with the given URL parameter - sf_endpoint, with a new service
-    function endpoint configuration given in the request body (JSON format). It can also be used for updating.
-
-    * Request:
-
-        Expects a JSON-formatted request body with the new service function endpoint configuration.
-
-    * Request Body Example:
-
-        ```json
-          {
-           "location": "location_2",
-           "server": "location_2",
-           "sfc": "sfc_1",
-           "sfc_instance": "sfc_i_1",
-           "sf_package": "sf_1",
-           "sf": "sf_i_1",
-           "sf_endpoint": "sf_endpoint_1"
-          }
-        ```
-
-    * Response
-
-        Returns a JSON-formatted response - a JSON object representing the new service function endpoint configuration that was created (updated).
-
-        Returns a 400 Bad Request error if the request body is invalid.
-
-        Returns a 400 Bad Request error if the url parameter is invalid.
-
-        Returns an 404 Not Found error if there is no service function endpoint configuration associated with the given URL parameter.
-
-        Returns a 409 Conflict error if there exists another service function endpoint configuration with the same 'sf_endpoint' ID as the ones in the request body.
-
-    * Response Body Example:
-
-        - Request made to /whoami/endpoints/instance?sf_endpoint=sf_endpoint_1
-
-        ```json
-          {
-           "location": "location_2",
-           "server": "location_2",
-           "sfc": "sfc_1",
-           "sfc_instance": "sfc_i_1",
-           "sf_package": "sf_1",
-           "sf": "sf_i_1",
-           "sf_endpoint": "sf_endpoint_1"
-          }
-        ```
-
-* **DELETE** ***/whoami/endpoints/instance?sf_endpoint={sf_endpoint_id}***
-
-    This API method deletes the uniquely defined service function endpoint configuration associated with the given URL parameter - sf_endpoint.
-
-    * Response:
-
-        Returns the JSON representation of the deleted object.
-
-        Returns an 404 Not Found error if there is no service function endpoint configuration associated with the given URL parameter.
-
-        Returns a 400 Bad Request error if the url parameter is invalid.
-
-    * Response Body Example:
-
-        - Request made to /whoami/endpoints/instance?sf_endpoint=sf_endpoint_1
-
-        ```json
-          {
-           "location": "location_1",
-           "server": "location_1",
-           "sfc": "sfc_1",
-           "sfc_instance": "sfc_i_1",
-           "sf_package": "sf_1",
-           "sf": "sf_i_1",
-           "sf_endpoint": "sf_endpoint_1"
-          }
-        ```
diff --git a/scripts/clmc-service/dependencies_util.py b/scripts/clmc-service/dependencies_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6f887f0f791814a9b5bb5a55772e560ef645fd6
--- /dev/null
+++ b/scripts/clmc-service/dependencies_util.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python3
+"""
+// © University of Southampton IT Innovation Centre, 2018
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road,
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Nikolay Stanchev
+//      Created Date :          31-07-2019
+//      Created for Project :   FLAME
+"""
+
+#
+# This script could be used to generate the list of requirements (a.k.a. python requirements.txt) in the correct dependency order with fixed versions for all packages. An example would be
+# if CLMC depends on package1 and package2 which both depend on package3 which depends on package4, the requirements order would be package4, package3, package2, package1.
+# The script expects a dependency tree as input which must be produced by the python pipdeptree utility library.
+#
+
+from collections import defaultdict
+
+
+def generate_dependency_mapping(dependency_tree):
+    """
+    Traverses the dependency tree generated by the pipdeptree utility and groups each dependency to a depth level
+
+    :param dependency_tree: the dependency tree generated from the 'pipdeptree --reverse' command
+    :return: a dictionary mapping each dependency to a depth level (depth 0 dependencies must be installed first, depth 1 after depth 0, etc.)
+            e.g. {0: {package1, package2}, 1: {package3}, 2: {package4, package5, package6}}
+    """
+
+    dependency_depths = defaultdict(set)
+    found = {}
+
+    # traverse each line
+    for line in dependency_tree.split("\n"):
+        if line == "":
+            continue  # ignore empty lines
+
+        # each line represents a dependency package, the number of spaces in the beginning determines the depth of this dependency
+        spaces_count = len(line) - len(line.lstrip(" "))
+
+        # spaces are multiples of 2
+        depth = int(spaces_count / 2)
+
+        package_name = line.lstrip(" ")
+        if package_name == "":
+            continue  # ignore lines that consisted of spaces only
+
+        if package_name.startswith("-"):
+            # pipdeptree will output a dash and a space in front of a dependency if the depth is greater than 0
+            package_name = package_name[2:]
+
+            # pipdeptree will also include information on which package requires this dependency, strip that info as well
+            package_name_end_index = package_name.index(" [")
+            package_name = package_name[: package_name_end_index]
+
+        if package_name.startswith("pipdeptree==") or package_name.startswith("pip==") or package_name.startswith("clmcservice=="):
+            continue  # ignore the clmcservice, pipdeptree and pip dependencies (pip is installed anyway before installing the generated list of requirements)
+
+        # if this is a new package, just add it to the appropriate depth level
+        if package_name not in found:
+            dependency_depths[depth].add(package_name)
+            found[package_name] = depth
+        # if package name already seen, and the current depth is greater than the previous depth, add it to the current depth and remove it from previous depth packages
+        else:
+            previous_depth = found[package_name]
+            if depth > previous_depth:
+                dependency_depths[previous_depth].remove(package_name)
+                dependency_depths[depth].add(package_name)
+                found[package_name] = depth
+
+    return dependency_depths
+
+
+def generate_dependency_requirements(dependency_mapping):
+    """
+    Generates a list of requirements in the order that they have to be installed.
+
+    :param dependency_mapping: a mapping from a depth level to a set of packages.
+
+    :return: a string each line of which represents a package to install
+    """
+
+    # find the maximum depth level in the list of dependencies
+    max_depth = max(dependency_mapping.keys())
+
+    # a list to save the output for each depth
+    outputs = []
+
+    # go through each depth and generate the output for the dependencies at this depth
+    for depth in range(0, max_depth+1):
+        if len(dependency_mapping[depth]) == 0:
+            print(depth)
+            continue
+
+        output = "\n".join(sorted(dependency_mapping[depth]))
+
+        # add the output to the list of output strings
+        outputs.append(output)
+
+    # return the combined output
+    return "\n\n".join(outputs)
+
+
+if __name__ == "__main__":
+
+    # get the command line argument
+    import sys
+    assert len(sys.argv) == 2, "Usage: python3 dependencies_util.py <dependency tree string>"
+    dep_tree = sys.argv[1]
+
+    # generate the ordered list of requirements and print it
+    dep_mapping = generate_dependency_mapping(dep_tree)
+    requirements_list = generate_dependency_requirements(dep_mapping)
+    print(requirements_list)
diff --git a/scripts/clmc-service/install-clmc-service.sh b/scripts/clmc-service/install-clmc-service.sh
index 43fa9f5f716c6eff477174f01cdc026aa7d5c9e0..d1a96e8ba739e72b83036a1a82f93fe25d58820b 100755
--- a/scripts/clmc-service/install-clmc-service.sh
+++ b/scripts/clmc-service/install-clmc-service.sh
@@ -26,13 +26,6 @@
 apt-get update -o Acquire::CompressionTypes::Order::=gz
 apt-get install libssl-dev -y
 
-# Create the database for the WHOAMI API
-apt-get install -y postgresql postgresql-contrib
-sudo -u postgres bash -c "psql -c \"CREATE USER clmc WITH PASSWORD 'clmc_service';\""
-sudo -u postgres bash -c "psql -c \"ALTER USER clmc CREATEDB;\""
-sudo -u postgres createdb whoamidb
-sudo -u postgres bash -c "psql -c \"GRANT ALL PRIVILEGES ON DATABASE \"whoamidb\" to clmc;\""
-
 # install virtualenvwrapper to manage python environments - and check
 echo "----> Installing Python3, Pip3 and curl"
 apt-get install -y python3 python3-pip curl jq
@@ -107,14 +100,6 @@ fi
 echo "----> Creating CLMC web service log directory"
 mkdir -p /var/log/flame/clmc
 
-# initialise the CLMC service database with the model tables
-echo "----> Initialising CLMC database"
-initialize_clmcservice_db production.ini
-if [[ $? -ne 0 ]] ; then
-        echo "Failed: initialising CLMC database"
-		exit 1
-fi
-
 # move the graph pipeline script
 cp ${REPO_ROOT}/scripts/clmc-service/graph-pipeline.sh /usr/local/bin/graph-pipeline.sh
 chmod u+x /usr/local/bin/graph-pipeline.sh
@@ -160,8 +145,3 @@ do
   echo "Waiting for clmc service port 9080 to be ready on localhost..."
   sleep 5
 done
-
-# install and start nginx
-apt-get install nginx -y
-cp ${REPO_ROOT}/scripts/clmc-service/nginx.conf /etc/nginx/nginx.conf
-systemctl restart nginx  # nginx is already started on installation, to read the new conf it needs to be restarted
diff --git a/scripts/clmc-service/install-nginx.sh b/scripts/clmc-service/install-nginx.sh
new file mode 100755
index 0000000000000000000000000000000000000000..8cde80764dddd30012913c4628dd8fe25a49132b
--- /dev/null
+++ b/scripts/clmc-service/install-nginx.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+#/////////////////////////////////////////////////////////////////////////
+#//
+#// (c) University of Southampton IT Innovation Centre, 2018
+#//
+#// Copyright in this software belongs to University of Southampton
+#// IT Innovation Centre of Gamma House, Enterprise Road,
+#// Chilworth Science Park, Southampton, SO16 7NS, UK.
+#//
+#// This software may not be used, sold, licensed, transferred, copied
+#// or reproduced in whole or in part in any manner or form or in or
+#// on any media by any person other than in accordance with the terms
+#// of the Licence Agreement supplied with the software, or otherwise
+#// without the prior written consent of the copyright owners.
+#//
+#// This software is distributed WITHOUT ANY WARRANTY, without even the
+#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+#// PURPOSE, except where stated in the Licence Agreement supplied with
+#// the software.
+#//
+#//      Created By :            Nikolay Stanchev
+#//      Created Date :          05/08/2019
+#//      Created for Project :   FLAME
+#//
+#/////////////////////////////////////////////////////////////////////////
+
+apt-get update -o Acquire::CompressionTypes::Order::=gz
+
+# install and start nginx
+apt-get install nginx -y
+cp ${REPO_ROOT}/scripts/clmc-service/nginx.conf /etc/nginx/nginx.conf
+systemctl restart nginx  # nginx is already started on installation, to read the new conf it needs to be restarted
diff --git a/scripts/clmc-service/install.sh b/scripts/clmc-service/install.sh
index 973e67e55f4c821f1cb8ad04aebd52066110b8d3..6e4ad7902dc4970fe148c40bf94b5fe875dd4945 100755
--- a/scripts/clmc-service/install.sh
+++ b/scripts/clmc-service/install.sh
@@ -24,7 +24,7 @@
 #//
 #/////////////////////////////////////////////////////////////////////////
 
-# Force fail on command fail (off for now as virtualenvwrapper install fails)
+# Force fail on command fail
 set -euo pipefail
 
 if [[ -z "${SFEMC_FQDN}" ]]; then
@@ -32,6 +32,11 @@ if [[ -z "${SFEMC_FQDN}" ]]; then
 	exit 1
 fi
 
+if [[ -z "${SDN_CONTROLLER_IP}" ]]; then
+    echo "Failed: cannot find SDN_CONTROLLER_IP environment variable."
+	exit 1
+fi
+
 if [[ -z "${NETWORK_DEPENDENCY}" ]]; then
     echo "Failed: cannot find NETWORK_DEPENDENCY environment variable."
 	exit 1
@@ -46,4 +51,5 @@ export REPO_ROOT=${REPO_ROOT}
 
 ./install-tick-stack.sh
 ./install-neo4j.sh
-./install-clmc-service.sh
\ No newline at end of file
+./install-clmc-service.sh
+./install-nginx.sh
\ No newline at end of file
diff --git a/src/service/VERSION b/src/service/VERSION
index ae058b3516610f17300d78d120ed958d5dd1e988..faf16644b2ebd4f9c9013ded49ba054d5fe10a28 100644
--- a/src/service/VERSION
+++ b/src/service/VERSION
@@ -1 +1 @@
-__version__ = "2.3.1"
\ No newline at end of file
+__version__ = "2.4.0"
\ No newline at end of file
diff --git a/src/service/clmcservice/__init__.py b/src/service/clmcservice/__init__.py
index 0f2b6a7d323004bdb88ddbf88f0af5785ef688a6..3709054357c589f72345fc92ee283e000a72c204 100644
--- a/src/service/clmcservice/__init__.py
+++ b/src/service/clmcservice/__init__.py
@@ -29,10 +29,6 @@ import os
 
 # PIP installed libs
 from pyramid.config import Configurator
-from sqlalchemy import engine_from_config
-
-# CLMC-service imports
-from clmcservice.models.meta import DBSession, Base
 
 
 ROOT_DIR = dirname(abspath(__file__))  # get the path of the root package (clmcservice) as a global variable
@@ -43,10 +39,6 @@ def main(global_config, **settings):
     This function returns a Pyramid WSGI application.
     """
 
-    engine = engine_from_config(settings, 'sqlalchemy.')  # initialise a database engine by using the 'sqlalchemy' setting in the configuration .ini file
-    DBSession.configure(bind=engine)  # bind the engine to a DB session
-    Base.metadata.bind = engine  # bind the engine to the Base class metadata
-
     settings['sfemc_fqdn'] = os.environ['SFEMC_FQDN']  # read the SFEMC FQDN from the OS environment
     settings['sfemc_port'] = int(os.environ.get('SFEMC_PORT', 8080))  # read the SFEMC port number from the OS environment, if not set use 8080 as default
     settings['sdn_controller_ip'] = os.environ['SDN_CONTROLLER_IP']  # read the SDN controller IP address from the OS environment
@@ -54,6 +46,7 @@ def main(global_config, **settings):
 
     settings['influx_port'] = int(settings['influx_port'])  # the influxdb port setting must be converted to integer instead of a string
     settings['kapacitor_port'] = int(settings['kapacitor_port'])  # the kapacitor port setting must be converted to integer instead of a string
+    settings['neo4j_port'] = int(settings['neo4j_port'])  # the neo4j port setting must be converted to integer instead of a string
     settings['clmc_service_port'] = int(settings['clmc_service_port'])  # the clmc service port setting must be converted to integer instead of a string
 
     settings["network_bandwidth"] = int(settings["network_bandwidth"])  # this is currently not used in the graph RTT calculation model
diff --git a/src/service/clmcservice/alertsapi/views.py b/src/service/clmcservice/alertsapi/views.py
index 4ddbc58ce1f4b10a2858a90145188960f4221853..6c7a28c200b52523c52a4c4f5d08a4e1a89c35a2 100644
--- a/src/service/clmcservice/alertsapi/views.py
+++ b/src/service/clmcservice/alertsapi/views.py
@@ -405,9 +405,6 @@ class AlertsConfigurationAPI(object):
             alert_spec_reference = self.request.POST.get('alert-spec')
             resource_spec_reference = self.request.POST.get('resource-spec')
 
-        # parse the resource specification file and extract the required information
-        resource_spec_sfc, resource_spec_sfc_instance, resource_spec_policy_triggers = self._parse_resource_spec(resource_spec_reference)
-
         # parse the alert specification file into a tosca template (including validation)
         tosca_tpl = self._parse_alert_spec(alert_spec_reference)
 
@@ -418,10 +415,18 @@ class AlertsConfigurationAPI(object):
         sfc = tosca_tpl.tpl["metadata"]["servicefunctionchain"]
         sfc_instance = "{0}_1".format(sfc)
 
-        # do validation between the two TOSCA documents
-        self._compare_alert_and_resource_spec(sfc, sfc_instance, alert_spec_policy_triggers, resource_spec_sfc, resource_spec_sfc_instance, resource_spec_policy_triggers)
+        # if the resource specification document was also provided in the request, do a validation between the two documents
+        if resource_spec_reference is not None:
+            # parse the resource specification file and extract the required information
+            resource_spec_sfc, resource_spec_sfc_instance, resource_spec_policy_triggers = self._parse_resource_spec(resource_spec_reference)
+
+            # do validation between the two TOSCA documents
+            self._compare_alert_and_resource_spec(sfc, sfc_instance, alert_spec_policy_triggers, resource_spec_sfc, resource_spec_sfc_instance, resource_spec_policy_triggers)
+        # otherwise don't do a validation and use an empty mapping for the extracted trigger identifiers from the resource specification
+        else:
+            resource_spec_policy_triggers = {}
 
-        db = sfc  # database per service function chain, named after the service function chain ID
+        db = sfc  # Influx database per service function chain, named after the service function chain ID
 
         # iterate through every policy and extract all triggers of the given policy - the returned lists of errors will be empty if no errors were encountered
         # while interacting with the Kapacitor HTTP API, the patch_flag is set to False so that existing resources are not recreated
@@ -485,7 +490,7 @@ class AlertsConfigurationAPI(object):
         :param db: Influx database ID
         :param kapacitor_host: default host is localhost (CLMC service running on the same machine as Kapacitor)
         :param kapacitor_port: default value to use is 9092
-        :param resource_spec_policy_triggers: the extracted policy-trigger strings from the resource specification
+        :param resource_spec_policy_triggers: the extracted policy-trigger strings from the resource specification TOSCA document, expecting an empty mapping if resource specification is not submitted to CLMC
         :param patch_duplicates: (defaults to False) if set to True, any duplication errors will be handled by first deleting the existing resource and then creating it
 
         :return: the list for tracking errors while interacting with Kapacitor tasks and the list for tracking errors while interacting with Kapacitor alert handlers
@@ -501,7 +506,10 @@ class AlertsConfigurationAPI(object):
             for trigger in policy.triggers:
                 event_id = trigger.name
                 policy_id = policy.name
-                resource_spec_trigger_id = resource_spec_policy_triggers.get("{0}\n{1}".format(policy_id, event_id))
+
+                # get the respective trigger identifier derived from the resource specification TOSCA document,
+                # if not specified use the trigger name from the alerts configuration TOSCA document
+                resource_spec_trigger_id = resource_spec_policy_triggers.get("{0}\n{1}".format(policy_id, event_id), trigger.name)
 
                 # get metadata defined for the trigger, defaults to empty key-value mapping
                 # the metadata is only useful for the relative alert template - can be used to set the diff evaluation to percentage difference
diff --git a/src/service/clmcservice/generate_network_measurements.py b/src/service/clmcservice/generate_network_measurements.py
deleted file mode 100644
index 41f99702cd7b2f2fead783cc7affb1507e74add6..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/generate_network_measurements.py
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/usr/bin/python3
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          25-06-2018
-//      Created for Project :   FLAME
-"""
-import getopt
-import sys
-from itertools import permutations
-from influxdb import InfluxDBClient
-from json import load
-from py2neo import Graph, Node, Relationship
-
-
-def report_network_measurements(influx_host, db_name, json_data, neo4j_host, neo4j_password):
-    """
-    Generates network measurements which follow the telegraf ping plugin format.
-
-    :param influx_host: influx DB host
-    :param db_name: name of database
-    :param json_data: the network configuration data
-    :param neo4j_host: the neo4j db host
-    :param neo4j_password: the neo4j db password
-    """
-
-    # declares the data to push to influx - host, url, avg_response_ms, min_response_ms, max_response_ms
-    data = tuple((link["source"], link["target"], link["avg_response_time"], link["min_response_time"], link["max_response_time"]) for link in json_data["links"])
-
-    json_body = [
-        {"measurement": "ping",
-         "tags": {"host": host, "url": url},
-         "fields": {"packets_transmitted": 10, "reply_received": 10, "packets_received": 10,
-                    "percent_reply_loss": 0, "percent_packets_loss": 0, "errors": 0, "average_response_ms": avg_ms,
-                    "minimum_response_ms": min_ms, "maximum_response_ms": max_ms, "result_code": 0},
-         "time": 1528385860 * 10**9
-         } for host, url, avg_ms, min_ms, max_ms in data
-    ]
-
-    print("Establishing connection with influx DB on {0} with database {1}".format(influx_host, db_name))
-    db_client = InfluxDBClient(host=influx_host, timeout=10, database=db_name)
-    db_client.drop_measurement("ping")  # clear data in the ping measurement from previous executions of this script
-    print("Writing network latency data to influx..\n")
-    assert db_client.write_points(json_body)  # assert the write method returns True - successful write
-
-    graph = Graph(host=neo4j_host, password=neo4j_password)
-
-    print("Building network links from the ping telegraf plugin in influx")
-    compute_nodes = set([host for host, url, avg_ms, min_ms, max_ms in data])
-    # retrieve all network latencies available from the influx ping table
-    for network_link in permutations(compute_nodes, 2):
-        from_node_name, to_node_name = network_link
-        from_node = graph.nodes.match("ComputeNode", name=from_node_name).first()
-        if from_node is None:
-            from_node = Node("ComputeNode", name=from_node_name)
-            graph.create(from_node)
-
-        to_node = graph.nodes.match("ComputeNode", name=to_node_name).first()
-        if to_node is None:
-            to_node = Node("ComputeNode", name=to_node_name)
-            graph.create(to_node)
-
-        # query = 'SELECT mean(*) FROM "{0}"."autogen"."ping" WHERE host=\'{1}\' and url=\'{2}\' and time>={3} and time<{4}'.format(db_name, from_node['name'], to_node['name'], from_timestamp, to_timestamp)
-        # In future when latencies are reported continuously, we should put timestamp filtering in the query for network links
-        query = 'SELECT mean(*) FROM "{0}"."autogen"."ping" WHERE host=\'{1}\' and url=\'{2}\''.format(db_name, from_node['name'], to_node['name'])
-        print("Executing query: {0}".format(query))
-
-        result = db_client.query(query)  # execute the query
-        # get the dictionary of result points; the next() function just gets the first element of the query results generator (we only expect one item in the generator)
-        try:
-            actual_result = next(result.get_points())
-            latency = actual_result.get("mean_average_response_ms")/2
-            if graph.relationships.match(nodes=(from_node, to_node), r_type="linkedTo").first() is None:
-                edge = Relationship(from_node, "linkedTo", to_node, latency=latency)
-                graph.create(edge)
-        except StopIteration:
-            # in this case there is no such link reported to Influx
-            print("There is no direct link between {0} and {1}".format(from_node, to_node))
-
-
-if __name__ == "__main__":
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], "h:d:p:", ['host=', 'database=', 'path='])
-    except getopt.GetoptError:
-        print('generate_network_measurements.py -h <influx host> -d <influx database> -p <network configuration file path>')
-        sys.exit(1)
-
-    if len(opts) != 3:
-        print('generate_network_measurements.py -h <influx host> -d <influx database> -p <network configuration file path>')
-        sys.exit(1)
-
-    db_host, database, path = None, None, None
-    # Apply options, if any
-    for opt, arg in opts:
-        if opt in ('-h', '--host'):
-            db_host = arg
-        elif opt in ('-d', '--database'):
-            database = arg
-        elif opt in ('-p', '--path'):
-            path = arg
-
-    if all([db_host is not None, database is not None, path is not None]):
-        with open(path) as fh:
-            json_data = load(fh)
-
-        report_network_measurements(db_host, database, json_data, db_host, "admin")
diff --git a/src/service/clmcservice/graphapi/conftest.py b/src/service/clmcservice/graphapi/conftest.py
index ed2d62e7bbd9570383084a611f0411e11d6e1af8..435e11d519dc3a2d660785ade88f66c4a287308d 100644
--- a/src/service/clmcservice/graphapi/conftest.py
+++ b/src/service/clmcservice/graphapi/conftest.py
@@ -202,7 +202,7 @@ def graph_network_topology():
 
     global links, switches, clusters
 
-    graph = Graph(host="localhost", password="admin")
+    graph = Graph(host="localhost", port=7687, password="admin")
     graph.node_cache.clear()
     graph.relationship_cache.clear()
 
diff --git a/src/service/clmcservice/graphapi/tests.py b/src/service/clmcservice/graphapi/tests.py
index 596725c22f9f73c61f11d0db3ce150bd4a971bed..14d6f3e6bb3dba2af61ea2f0b86b36d78c2b48bc 100644
--- a/src/service/clmcservice/graphapi/tests.py
+++ b/src/service/clmcservice/graphapi/tests.py
@@ -46,7 +46,7 @@ class TestGraphAPI(object):
         """
 
         self.registry = testing.setUp()
-        self.registry.add_settings({"neo4j_host": "localhost", "neo4j_password": "admin", "influx_host": "localhost", "influx_port": 8086, "network_bandwidth": 104857600,
+        self.registry.add_settings({"neo4j_host": "localhost", "neo4j_port": 7687, "neo4j_password": "admin", "influx_host": "localhost", "influx_port": 8086, "network_bandwidth": 104857600,
                                     "network_ues_path": "/opt/clmc/src/service/resources/GraphAPI/network_ues.json",
                                     "network_clusters_path": "/opt/clmc/src/service/resources/GraphAPI/network_ues.json",
                                     "sdn_controller_ip": "127.0.0.1", "sdn_controller_port": 8080
diff --git a/src/service/clmcservice/graphapi/views.py b/src/service/clmcservice/graphapi/views.py
index 9fb84e62548df3852fe71854b5526b8b908c5275..23be30fe68f204f50164e5faf0ff10bc2ca7b1f4 100644
--- a/src/service/clmcservice/graphapi/views.py
+++ b/src/service/clmcservice/graphapi/views.py
@@ -505,7 +505,7 @@ class GraphAPI(object):
         :return: reference to the graph DB object
         """
 
-        graph = Graph(host=self.request.registry.settings['neo4j_host'], password=self.request.registry.settings['neo4j_password'])
+        graph = Graph(host=self.request.registry.settings['neo4j_host'], port=self.request.registry.settings["neo4j_port"], password=self.request.registry.settings['neo4j_password'])
 
         # TODO we probably need to remove the caching functionality of py2neo and maintain our own version
         # NOTE: make sure Py2neo caching is disabled!
diff --git a/src/service/clmcservice/initialize_db.py b/src/service/clmcservice/initialize_db.py
deleted file mode 100644
index c6987d65ce72a38ae229b6ad9cdd55d0756efb47..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/initialize_db.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/python3
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          25-06-2018
-//      Created for Project :   FLAME
-"""
-
-import os
-import sys
-from sqlalchemy import engine_from_config
-from pyramid.paster import get_appsettings, setup_logging
-from clmcservice.models.meta import Base
-
-
-def usage(argv):
-    """
-    A method to be called when the script has been used in an incorrect way.
-
-    :param argv: cmd arguments
-    """
-
-    cmd = os.path.basename(argv[0])
-    print('usage: %s <config_uri>\n'
-          '(example: "%s development.ini")' % (cmd, cmd))
-    sys.exit(1)
-
-
-def main(argv=sys.argv):
-    """
-    Main method of the script - initialises the database by creating all tables declared in the models.py module
-
-    :param argv: command line arguments - expects a configuration .ini file from which it retrieves the URL with which to connect to postgresql
-    """
-
-    if len(argv) != 2:
-        usage(argv)  # in case of wrong usage
-
-    config_uri = argv[1]
-    setup_logging(config_uri)
-
-    settings = get_appsettings(config_uri)  # get application specific settings
-    engine = engine_from_config(settings, 'sqlalchemy.')  # create the db engine from the sqlalchemy setting configured in the .ini file
-
-    Base.metadata.create_all(engine)  # creates all model tables
diff --git a/src/service/clmcservice/models/__init__.py b/src/service/clmcservice/models/__init__.py
index 345922d34bbb09512b630750722082e86a645eaf..ac6f5d05cf9fd489c6dfb268863faeab644c766b 100644
--- a/src/service/clmcservice/models/__init__.py
+++ b/src/service/clmcservice/models/__init__.py
@@ -1,5 +1 @@
-from .meta import DBSession
-
 from .graphapi_models import MonitoringProcess
-# NOTE: all ORM models defined in this package must be imported here (in the __init__.py file) - Pyramid and SQLAlchemy specific approach
-from .whoami_models import ServiceFunctionEndpoint
diff --git a/src/service/clmcservice/models/meta.py b/src/service/clmcservice/models/meta.py
deleted file mode 100644
index 698d750c650ec21a442f217b4e0f998a09ca68ea..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/models/meta.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/python3
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          02-07-2018
-//      Created for Project :   FLAME
-"""
-
-
-import transaction
-from sqlalchemy.ext.declarative import declarative_base
-from sqlalchemy.orm import scoped_session, sessionmaker
-from zope.sqlalchemy import ZopeTransactionExtension
-
-
-DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))  # initialise a ORM session, ought to be reused across the different modules
-
-
-class ORMClass(object):
-    """
-    Declares a parent class for all models which eases querying
-    """
-
-    @classmethod
-    def query(cls):
-        """
-        Pass down the class name when using the DBSession.query method and use ModelClass.query() instead of DBSession.query(ModelClass)
-
-        :return: the query result object
-        """
-
-        global DBSession
-
-        return DBSession.query(cls)
-
-    @staticmethod
-    def add(instance):
-        """
-        Adds an instance of a model to the database.
-
-        :param instance: the instance to be created in the db.
-        """
-
-        global DBSession
-
-        with transaction.manager:
-            DBSession.add(instance)
-
-    @staticmethod
-    def delete(instance):
-        """
-        Deletes an instance of a model from the database.
-
-        :param instance: the instance to be deleted from the db.
-        """
-
-        global DBSession
-
-        with transaction.manager:
-            DBSession.delete(instance)
-
-    @staticmethod
-    def replace(old_instance, new_instance):
-        """
-        Replaces an instance of a model from the database with a new instance.
-
-        :param old_instance: the instance to be replaced from the db.
-        :param new_instance: the new instance
-        """
-
-        global DBSession
-
-        with transaction.manager:
-            DBSession.add(new_instance)
-            DBSession.delete(old_instance)
-
-    @classmethod
-    def delete_all(cls):
-        """
-        Deletes all instances of a model from the database.
-        """
-
-        global DBSession
-
-        with transaction.manager:
-            deleted_rows = DBSession.query(cls).delete()
-
-        return deleted_rows
-
-
-Base = declarative_base(cls=ORMClass)  # initialise a declarative Base instance to use for the web app models (inherits from the base ORM class defined above)
diff --git a/src/service/clmcservice/models/whoami_models.py b/src/service/clmcservice/models/whoami_models.py
deleted file mode 100644
index 3fdc01ea7030c777e57aca94facc765eca0a2ab9..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/models/whoami_models.py
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/python3
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          02-07-2018
-//      Created for Project :   FLAME
-"""
-
-from sqlalchemy import Column, String, Integer, UniqueConstraint, and_
-from clmcservice.models.meta import Base
-
-
-class ServiceFunctionEndpoint(Base):
-    """
-    This class defines the main model of the WHOAMI API, declaring the global tags for a specific service function on a specific endpoint.
-    """
-
-    __tablename__ = 'sfendpoint'  # table name in the PostgreSQL database
-
-    __table_args__ = (UniqueConstraint('sf', 'sf_endpoint'),)  # defines a unique constraint across 2 columns - sf, sf_endpoint
-
-    server = Column(String, nullable=False)  # cluster label
-    location = Column(String, nullable=False)  # location label
-    sfc = Column(String, nullable=False)  # service function chain label
-    sfc_instance = Column(String, nullable=False)  # service function chain instance identifier
-    sf_package = Column(String, nullable=False)  # service function package label
-    sf = Column(String, nullable=False)   # service function node defined in the TOSCA resource specification
-    sf_endpoint = Column(String, nullable=False, primary_key=True)  # service function endpoint (FQDN(s) + IP address)
-
-    @property
-    def json(self):
-        """
-        Converts an instance of a ServiceFunctionEndpoint to JSON format.
-
-        :return: a python dictionary object
-        """
-
-        fields = {c.name: getattr(self, c.name) for c in self.__table__.columns}
-
-        return fields
-
-    @staticmethod
-    def get(sf_endpoint):
-        """
-        Gets the service function endpoint object or None if not existing.
-
-        :param sf_endpoint: service function endpoint
-
-        :return: the first object from the result set that matches the unique constraint (should be only one) or None
-        """
-
-        return ServiceFunctionEndpoint.query().filter(ServiceFunctionEndpoint.sf_endpoint == sf_endpoint).first()
-
-    @staticmethod
-    def exists(sf_endpoint):
-        """
-        Checks if an instance matching the sf_endpoint ID exists.
-
-        :param sf_endpoint: service function endpoint ID to check
-
-        :return: True if exists, False otherwise
-        """
-
-        return ServiceFunctionEndpoint.get(sf_endpoint) is not None
diff --git a/src/service/clmcservice/whoamiapi/__init__.py b/src/service/clmcservice/whoamiapi/__init__.py
deleted file mode 100644
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/whoamiapi/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/src/service/clmcservice/whoamiapi/conftest.py b/src/service/clmcservice/whoamiapi/conftest.py
deleted file mode 100644
index 9a531290eeb89beaebe472a07fe266ca7b608c75..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/whoamiapi/conftest.py
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/python3
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          25-06-2018
-//      Created for Project :   FLAME
-"""
-
-import pytest
-from sqlalchemy import create_engine
-from sqlalchemy.exc import ProgrammingError, OperationalError
-from clmcservice.models.meta import DBSession, Base
-
-
-def create_test_database(db_name):
-    """
-    This function creates a test database with the given name. If the database already exists, it is recreated.
-
-    :param db_name: the test database name
-    """
-
-    engine = create_engine("postgresql://clmc:clmc_service@localhost:5432/postgres", echo=False)
-    conn = engine.connect().execution_options(autocommit=False)
-    conn.execute("ROLLBACK")  # connection is already in a transaction, hence roll back (postgres databases cannot be created in a transaction)
-    try:
-        conn.execute("DROP DATABASE %s" % db_name)
-        print("\nOld database '{0}' has been deleted.".format(db_name))
-    except ProgrammingError:
-        # database probably doesn't exist
-        conn.execute("ROLLBACK")
-    except OperationalError as e:
-        print(e)
-        # database exists and is probably being used by other users
-        conn.execute("ROLLBACK")
-        conn.close()
-        engine.dispose()
-        raise pytest.exit("Old test database cannot be deleted.")
-
-    conn.execute("CREATE DATABASE %s" % db_name)
-    conn.close()
-    engine.dispose()
-    print("\nNew test database '{0}' has been created.".format(db_name))
-
-
-def initialise_database(db_name):
-    """
-    This function initialises the test database by binding the shared DB session to a new connection engine and creating tables for all models.
-
-    :param db_name: test database name
-    :return: the configured DB session, which is connected to the test database
-    """
-
-    engine = create_engine('postgresql://clmc:clmc_service@localhost:5432/{0}'.format(db_name))  # create an engine to connect to the test database
-    DBSession.configure(bind=engine)  # configure the database session
-    Base.metadata.bind = engine
-    Base.metadata.create_all()  # create tables for all models
-
-    return DBSession, engine
-
-
-def drop_test_database(db_name):
-    """
-    This function removes the test database with the given name, if it exists
-
-    :param db_name: the test database name
-    """
-
-    engine = create_engine("postgresql://clmc:clmc_service@localhost:5432/postgres", echo=False)
-    conn = engine.connect().execution_options(autocommit=False)
-    conn.execute("ROLLBACK")  # connection is already in a transaction, hence roll back (postgres databases cannot be created in a transaction)
-    try:
-        conn.execute("DROP DATABASE %s" % db_name)
-        print("\nTest database '{0}' has been deleted.".format(db_name))
-    except ProgrammingError:
-        # database probably doesn't exist
-        conn.execute("ROLLBACK")
-    except OperationalError as e:
-        print(e)
-        # database is probably being used by other users
-        conn.execute("ROLLBACK")
-
-    conn.close()
-    engine.dispose()
-
-
-@pytest.fixture(scope='module', autouse=True)
-def testing_db_session():
-
-    test_database = "whoamitestdb"
-    create_test_database(test_database)  # create a database used for executing the unit tests
-    db_session, engine = initialise_database(test_database)  # initialise the database with the models and retrieve a db session
-
-    yield db_session  # return the db session if needed in any of the tests
-
-    db_session.remove()  # remove the db session
-    engine.dispose()  # dispose from the engine
-    drop_test_database(test_database)  # remove the test database
diff --git a/src/service/clmcservice/whoamiapi/tests.py b/src/service/clmcservice/whoamiapi/tests.py
deleted file mode 100644
index 9c6238c45b6fc8efb32c815d2b62961c06ccc953..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/whoamiapi/tests.py
+++ /dev/null
@@ -1,315 +0,0 @@
-#!/usr/bin/python3
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          25-06-2018
-//      Created for Project :   FLAME
-"""
-
-import pytest
-from json import dumps
-from pyramid import testing
-from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound, HTTPConflict
-from clmcservice.models import ServiceFunctionEndpoint
-from clmcservice.whoamiapi.views import WhoamiAPI
-
-
-class TestWhoamiAPI(object):
-    """
-    A pytest-implementation test for the WHOAMI API endpoints
-    """
-
-    @pytest.fixture(autouse=True)
-    def app_config(self):
-        """
-        A fixture to implement setUp/tearDown functionality for all tests by initializing configuration structure for the web service and db connection
-        """
-
-        self.registry = testing.setUp()
-
-        yield
-
-        testing.tearDown()
-        ServiceFunctionEndpoint.delete_all()  # clear the instances of the model in the test database
-
-    def test_get_all(self):
-        """
-        Tests the GET all method of the WHOAMI API - returns a list of all service function endpoint configurations from the database.
-        """
-
-        request = testing.DummyRequest()
-        response = WhoamiAPI(request).get_all()
-        assert response == [], "Initially there mustn't be any service function endpoint configurations in the database."
-
-        sf_e = ServiceFunctionEndpoint(server="DC1", location="DC1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i1", sf_endpoint="sf_endpoint1")
-        expected_response_data = [sf_e.json]
-        ServiceFunctionEndpoint.add(sf_e)  # adds the new instance of the model to the database
-
-        request = testing.DummyRequest()
-        response = WhoamiAPI(request).get_all()
-        assert response == expected_response_data, "Incorrect response data with 1 service function endpoint configuration."
-
-        sf_e = ServiceFunctionEndpoint(server="DC2", location="DC2", sfc="sfc2", sfc_instance="sfc_i2", sf_package="sf2", sf="sf_i2", sf_endpoint="sf_endpoint2")
-        expected_response_data.append(sf_e.json)
-        ServiceFunctionEndpoint.add(sf_e)
-        sf_e = ServiceFunctionEndpoint(server="DC3", location="DC3", sfc="sfc3", sfc_instance="sfc_i3", sf_package="sf3", sf="sf_i3", sf_endpoint="sf_endpoint3")
-        expected_response_data.append(sf_e.json)
-        ServiceFunctionEndpoint.add(sf_e)
-
-        request = testing.DummyRequest()
-        response = WhoamiAPI(request).get_all()
-        assert response == expected_response_data, "Incorrect response data with more than 1 service function endpoint configurations."
-
-    def test_get_one(self):
-        """
-        Tests the GET one method of the WHOAMI API - returns an instance of a service function endpoint configuration from the database.
-        """
-
-        request = testing.DummyRequest()
-        response = WhoamiAPI(request).get_all()
-        assert response == [], "Initially there mustn't be any service function endpoint configurations in the database."
-
-        self._validation_of_url_parameters_test("get_one")
-
-        sf_e = ServiceFunctionEndpoint(location="DC1", server="DC1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i1", sf_endpoint="sf_endpoint1")
-        expected_response_data = sf_e.json
-        ServiceFunctionEndpoint.add(sf_e)  # adds the new instance of the model to the database
-
-        request = testing.DummyRequest()
-        request.params["sf_endpoint"] = "sf_endpoint1"
-        response = WhoamiAPI(request).get_one()
-        assert response == expected_response_data, "Invalid data returned in the response of GET instance"
-
-        request = testing.DummyRequest()
-        request.params["sf_endpoint"] = "sf_endpoint2"
-        error_raised = False
-        try:
-            WhoamiAPI(request).get_one()
-        except HTTPNotFound:
-            error_raised = True
-        assert error_raised, "Not found error must be raised in case of a non existing service function endpoint"
-
-    def test_post(self):
-        """
-        Tests the POST method of the WHOAMI API - creates an instance of a service function endpoint configuration in the database.
-        """
-
-        request = testing.DummyRequest()
-        response = WhoamiAPI(request).get_all()
-        assert response == [], "Initially there mustn't be any service function endpoint configurations in the database."
-
-        resource = dict(server="DC1", location="DC1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i1", sf_endpoint="sf_endpoint1")
-        json_data = dumps(resource)
-        request = testing.DummyRequest()
-        request.body = json_data.encode(request.charset)
-        response = WhoamiAPI(request).post()
-        assert response == resource, "POST request must return the created resource"
-        assert ServiceFunctionEndpoint.exists("sf_endpoint1"), "POST request must have created the resource"
-
-        resource["location"] = "DC2"
-        json_data = dumps(resource)
-        request = testing.DummyRequest()
-        request.body = json_data.encode(request.charset)
-        error_raised = False
-        try:
-            WhoamiAPI(request).post()
-        except HTTPConflict:
-            error_raised = True
-        assert error_raised, "An error must be raised when trying to create a resource which breaks the unique constraint"
-
-    @pytest.mark.parametrize("body, valid", [
-        ('{"location": "DC1", "server": "DC1", "sfc": "sfc1", "sfc_instance": "sfc_i1", "sf_package": "sf1", "sf": "sf_i1", "sf_endpoint": "sf_endpoint1"}', True),
-        ('{"location": "DC2", "server": "DC2", "sfc": "sfc2", "sfc_instance": "sfc_i2", "sf_package": "sf2", "sf": "sf_i2", "sf_endpoint": "sf_endpoint2"}', True),
-        ('{}', False),
-        ('{"location": "DC1", "server": "DC1", "sfc": "sfc1", "sfc_instance": "sfc_i1", "sf_package": "sf1", "sf": "sf_i1"}', False),
-        ('{"place": "DC2", "server": "DC2", "sfc": "sfc2", "sfc_instance": "sfc_i2", "sf_package": "sf2", "sf": "sf_i2", "sf_endpoint": "sf_endpoint2"}', False),
-        ('{invalid json}', False),
-    ])
-    def test_post_body_validation(self, body, valid):
-        """
-        Tests the POST request validation of the body content.
-
-        :param body: The request body to be validated
-        :param valid: True if body is valid, False otherwise
-        """
-
-        request = testing.DummyRequest()
-        request.body = body.encode(request.charset)
-        error_raised = False
-        try:
-            WhoamiAPI(request).post()
-        except HTTPBadRequest:
-            error_raised = True
-        assert error_raised == (not valid), "An error must be raised in case of an invalid request body"
-
-    def test_put(self):
-        """
-        Tests the PUT method of the WHOAMI API - overwrites an instance of a service function endpoint configuration from the database.
-        """
-
-        request = testing.DummyRequest()
-        response = WhoamiAPI(request).get_all()
-        assert response == [], "Initially there mustn't be any service function endpoint configurations in the database."
-
-        self._validation_of_url_parameters_test("put")
-
-        resource = dict(location="location1", server="location1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i1", sf_endpoint="sf_endpoint1")
-        body = dumps(resource)
-        request = testing.DummyRequest()
-        request.params["sf_endpoint"] = "sf_endpoint1"
-        request.body = body.encode(request.charset)
-        error_raised = False
-        try:
-            WhoamiAPI(request).put()
-        except HTTPNotFound:
-            error_raised = True
-        assert error_raised, "Not found error must be raised in case of a non existing service function endpoint"
-
-        sf_e = ServiceFunctionEndpoint(location="DC1", server="DC1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i1", sf_endpoint="sf_endpoint1")
-        ServiceFunctionEndpoint.add(sf_e)  # adds the new instance of the model to the database
-
-        resource = dict(location="location1", server="location1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i1", sf_endpoint="sf_endpoint1")
-        body = dumps(resource)
-        request = testing.DummyRequest()
-        request.params["sf_endpoint"] = "sf_endpoint1"
-        request.body = body.encode(request.charset)
-        response = WhoamiAPI(request).put()
-        assert response == resource, "PUT request must return the updated resource"
-        assert ServiceFunctionEndpoint.get("sf_endpoint1").json["location"] == "location1"
-
-        resource = dict(location="DC1", server="DC1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i2", sf_endpoint="sf_endpoint2")
-        body = dumps(resource)
-        request = testing.DummyRequest()
-        request.params["sf_endpoint"] = "sf_endpoint1"
-        request.body = body.encode(request.charset)
-        response = WhoamiAPI(request).put()
-        assert response == resource, "PUT request must return the updated resource"
-        assert not ServiceFunctionEndpoint.exists("sf_endpoint1"), "Resource has not been updated"
-        assert ServiceFunctionEndpoint.exists("sf_endpoint2"), "Resource has not been updated"
-
-        sf_e = ServiceFunctionEndpoint(location="DC1", server="DC1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i1", sf_endpoint="sf_endpoint1")
-        ServiceFunctionEndpoint.add(sf_e)  # adds the new instance of the model to the database
-
-        resource = dict(location="DC1", server="DC1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i2", sf_endpoint="sf_endpoint2")
-        body = dumps(resource)
-        request = testing.DummyRequest()
-        request.params["sf_endpoint"] = "sf_endpoint1"
-        request.body = body.encode(request.charset)
-        error_raised = False
-        try:
-            WhoamiAPI(request).put()
-        except HTTPConflict:
-            error_raised = True
-        assert error_raised, "PUT request breaks unique constraint"
-
-    @pytest.mark.parametrize("body, valid", [
-        ('{"location": "DC1", "server": "DC1", "sfc": "sfc1", "sfc_instance": "sfc_i1", "sf_package": "sf1", "sf": "sf_i1", "sf_endpoint": "sf_endpoint1"}', True),
-        ('{"location": "DC2", "server": "DC2", "sfc": "sfc2", "sfc_instance": "sfc_i2", "sf_package": "sf2", "sf": "sf_i2", "sf_endpoint": "sf_endpoint2"}', True),
-        ('{}', False),
-        ('{"location": "DC1", "server": "DC1", "sfc": "sfc1", "sfc_instance": "sfc_i1", "sf_package": "sf1", "sf": "sf_i1"}', False),
-        ('{"place": "DC2", "server": "DC2", "sfc": "sfc2", "sfc_instance": "sfc_i2", "sf_package": "sf2", "sf": "sf_i2", "sf_endpoint": "sf_endpoint2"}', False),
-        ('{invalid json}', False),
-    ])
-    def test_put_body_validation(self, body, valid):
-        """
-        Tests the PUT request validation of the body content.
-
-        :param body: The request body to be validated
-        :param valid: True if body is valid, False otherwise
-        """
-
-        sf_e = ServiceFunctionEndpoint(location="DC1", server="DC1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i1", sf_endpoint="sf_endpoint1")
-        ServiceFunctionEndpoint.add(sf_e)  # adds the new instance of the model to the database
-
-        request = testing.DummyRequest()
-        request.params["sf_endpoint"] = "sf_endpoint1"
-        request.body = body.encode(request.charset)
-        error_raised = False
-        try:
-            WhoamiAPI(request).put()
-        except HTTPBadRequest:
-            error_raised = True
-
-        assert error_raised == (not valid)
-
-    def test_delete(self):
-        """
-        Tests the DELETE method of the WHOAMI API - deletes an instance of a service function endpoint configuration from the database.
-        """
-
-        request = testing.DummyRequest()
-        response = WhoamiAPI(request).get_all()
-        assert response == [], "Initially there mustn't be any service function endpoint configurations in the database."
-
-        self._validation_of_url_parameters_test("delete")
-
-        sf_e = ServiceFunctionEndpoint(location="DC1", server="DC1", sfc="sfc1", sfc_instance="sfc_i1", sf_package="sf1", sf="sf_i1", sf_endpoint="sf_endpoint1")
-        to_delete = sf_e.json
-        ServiceFunctionEndpoint.add(sf_e)  # adds the new instance of the model to the database
-
-        assert ServiceFunctionEndpoint.exists("sf_endpoint1")
-
-        request = testing.DummyRequest()
-        request.params["sf_endpoint"] = "sf_endpoint1"
-        response = WhoamiAPI(request).delete()
-        assert response == to_delete, "DELETE must return the deleted object if successful"
-
-        assert not ServiceFunctionEndpoint.exists("sf_endpoint1"), "Resource must be deleted after the delete API method has been called."
-
-        request = testing.DummyRequest()
-        request.params["sf_endpoint"] = "sf_endpoint1"
-        error_raised = False
-        try:
-            WhoamiAPI(request).delete()
-        except HTTPNotFound:
-            error_raised = True
-        assert error_raised, "Not found error must be raised in case of a non existing service function endpoint"
-
-    @staticmethod
-    def _validation_of_url_parameters_test(method):
-        """
-        Validates the way a whoami API method handles url query parameters
-
-        :param method: the method to test
-        """
-
-        request = testing.DummyRequest()
-        error_raised = False
-        try:
-            getattr(WhoamiAPI(request), method).__call__()
-        except HTTPBadRequest:
-            error_raised = True
-        assert error_raised, "Error must be raised in case of no URL parameters"
-
-        request = testing.DummyRequest()
-        request.params["sf_i"] = "sf_i"
-        request.params["sr"] = "sr"
-        try:
-            getattr(WhoamiAPI(request), method).__call__()
-        except HTTPBadRequest:
-            error_raised = True
-        assert error_raised, "Error must be raised in case of wrong arguments"
-
-        request = testing.DummyRequest()
-        request.params["sf_endp"] = "sf_endpoint"  # argument should be sf_endpoint
-        try:
-            getattr(WhoamiAPI(request), method).__call__()
-        except HTTPBadRequest:
-            error_raised = True
-        assert error_raised, "Error must be raised in case of invalid naming of arguments"
diff --git a/src/service/clmcservice/whoamiapi/utilities.py b/src/service/clmcservice/whoamiapi/utilities.py
deleted file mode 100644
index 0ee493671cc0ad6f1a989821ce98df5d70bacaa6..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/whoamiapi/utilities.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/python3
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          25-06-2018
-//      Created for Project :   FLAME
-"""
-
-from json import loads
-from clmcservice.models import ServiceFunctionEndpoint
-
-
-def validate_sfendpoint_body(body):
-    """
-    Validates the request body used to create an endpoint configuration resource in the database.
-
-    :param body: the request body to validate
-
-    :return the validated configuration dictionary object
-
-    :raise AssertionError: if the body is not a valid configuration
-    """
-
-    try:
-        body = loads(body)
-    except Exception:
-        raise AssertionError("Configuration must be a JSON object.")
-
-    assert len(body) == len(ServiceFunctionEndpoint.__table__.columns), "Endpoint configuration mustn't contain a different number of attributes than the number of required ones."
-
-    # validate that all required attributes are given in the body
-    for attribute in ServiceFunctionEndpoint.__table__.columns:
-        assert attribute.name in body, "Required attribute not found in the request content."
-
-    return body
diff --git a/src/service/clmcservice/whoamiapi/views.py b/src/service/clmcservice/whoamiapi/views.py
deleted file mode 100644
index b89bfb5febc963521b43d8aa12ac379520e02ec4..0000000000000000000000000000000000000000
--- a/src/service/clmcservice/whoamiapi/views.py
+++ /dev/null
@@ -1,190 +0,0 @@
-#!/usr/bin/python3
-"""
-// © University of Southampton IT Innovation Centre, 2018
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road,
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Nikolay Stanchev
-//      Created Date :          25-06-2018
-//      Created for Project :   FLAME
-"""
-
-from pyramid.httpexceptions import HTTPBadRequest, HTTPConflict, HTTPNotFound
-from pyramid.view import view_defaults, view_config
-from clmcservice.models import ServiceFunctionEndpoint
-from clmcservice.whoamiapi.utilities import validate_sfendpoint_body
-
-
-@view_defaults(renderer='json')
-class WhoamiAPI(object):
-    """
-    A class-based view for accessing and mutating the configuration of SF endpoints - namely, the WHOAMI API.
-    """
-
-    def __init__(self, request):
-        """
-        Initialises the instance of the view with the request argument.
-
-        :param request: client's call request
-        """
-
-        self.request = request
-
-    @view_config(route_name='whoami_endpoints', request_method='GET')
-    def get_all(self):
-        """
-        GET API call for all resources.
-
-        :return: A list of all service function endpoint configurations found in the database.
-        """
-
-        return [instance.json for instance in ServiceFunctionEndpoint.query()]
-
-    @view_config(route_name='whoami_endpoints_instance', request_method='GET')
-    def get_one(self):
-        """
-        GET API call for a single resources.
-
-        :return: One service function endpoint configuration instance retrieved from the database by querying the uniquely constrained columns.
-
-        :raises HTTPBadRequest: if the request parameters are invalid(invalid url query string)
-        :raises HTTPNotFound: if a resource with the given parameters doesn't exist in the database
-        """
-
-        sf_endpoint = self._get_sf_endpoint_from_url_string()
-        if sf_endpoint is None:
-            raise HTTPNotFound("A service function endpoint with the given URL query parameters doesn't exist.")
-        else:
-            return sf_endpoint.json
-
-    @view_config(route_name='whoami_endpoints', request_method='POST')
-    def post(self):
-        """
-        A POST API call to create a new service function endpoint.
-
-        :return: A JSON response to the POST call - essentially with the data of the new resource
-
-        :raises HTTPBadRequest: if request body is not a valid JSON for the configuration
-        :raises HTTPConflict: if the unique constraints are not preserved after the creation of a new instance
-        """
-
-        # create an instance of the model and add it to the database table
-        sf_endpoint = self._validate_and_create()
-        json_data = sf_endpoint.json
-        ServiceFunctionEndpoint.add(sf_endpoint)
-
-        self.request.response.status = 201
-
-        return json_data
-
-    @view_config(route_name='whoami_endpoints_instance', request_method='PUT')
-    def put(self):
-        """
-        A PUT API call to update a service function endpoint.
-
-        :return: A JSON response representing the updated object
-
-        :raises HTTPBadRequest: if the request parameters are invalid(invalid url query string)
-        :raises HTTPNotFound: if a resource with the given parameters doesn't exist in the database
-        """
-
-        sf_endpoint = self._get_sf_endpoint_from_url_string()
-        if sf_endpoint is None:
-            raise HTTPNotFound("A service function endpoint with the given identifier doesn't exist.")
-        else:
-            try:
-                body = self.request.body.decode(self.request.charset)
-                validated_body = validate_sfendpoint_body(body)  # validate the content and receive a json dictionary object
-            except AssertionError as e:
-                raise HTTPBadRequest("Bad request content. Configuration format is incorrect: {0}".format(e.args))
-
-            new_resource = validated_body
-            old_resource = sf_endpoint.json
-            updating = new_resource["sf_endpoint"] == old_resource["sf_endpoint"]
-
-            if updating:
-                ServiceFunctionEndpoint.delete(sf_endpoint)
-                new_sf_endpoint = ServiceFunctionEndpoint(**validated_body)
-                ServiceFunctionEndpoint.add(new_sf_endpoint)
-            else:
-                resource_exists = ServiceFunctionEndpoint.exists(new_resource["sf_endpoint"])
-                if resource_exists:
-                    raise HTTPConflict("Service function endpoint with this configuration already exists.")  # error 409 in case of resource conflict
-
-                new_sf_endpoint = ServiceFunctionEndpoint(**validated_body)
-                ServiceFunctionEndpoint.replace(sf_endpoint, new_sf_endpoint)
-
-            return validated_body
-
-    @view_config(route_name='whoami_endpoints_instance', request_method='DELETE')
-    def delete(self):
-        """
-        Deletes an instance of a service function endpoint configuration in the database.
-
-        :return: A content of the object that has been deleted
-
-        :raises HTTPBadRequest: if the request parameters are invalid(invalid url query string)
-        :raises HTTPNotFound: if a resource with the given parameters doesn't exist in the database
-        """
-
-        sf_endpoint = self._get_sf_endpoint_from_url_string()
-        if sf_endpoint is None:
-            raise HTTPNotFound("A service function endpoint with the given parameters doesn't exist.")
-        else:
-            deleted = sf_endpoint.json
-            ServiceFunctionEndpoint.delete(sf_endpoint)
-            return deleted
-
-    def _get_sf_endpoint_from_url_string(self):
-        """
-        Retrieves a service function endpoint configuration from the database by validating and then using the request url parameters.
-
-        :return: An instance of a service function endpoint configuration or None if not existing
-        """
-
-        sf_endpoint_id = self.request.params.get("sf_endpoint")
-        if sf_endpoint_id is None:
-            raise HTTPBadRequest("Request format is incorrect: sf_endpoint ID is not found in the URL query string.")
-
-        sf_endpoint = ServiceFunctionEndpoint.get(sf_endpoint_id)
-
-        return sf_endpoint
-
-    def _validate_and_create(self):
-        """
-        Validates the request body and checks if a resource with the given attributes already exists.
-
-        :return: a new instance of the model, if the resource doesn't exist
-        :raises HTTPBadRequest: if request body is not a valid JSON for the configuration
-        :raises HTTPConflict: if the unique constraints are not preserved after the creation of a new instance
-        """
-
-        try:
-            body = self.request.body.decode(self.request.charset)
-            validated_body = validate_sfendpoint_body(body)  # validate the content and receive a json dictionary object
-        except AssertionError as e:
-            raise HTTPBadRequest("Bad request content. Configuration format is incorrect: {0}".format(e.args))
-
-        resource = validated_body
-
-        resource_exists = ServiceFunctionEndpoint.exists(resource["sf_endpoint"])
-        if resource_exists:
-            raise HTTPConflict("Service function endpoint with the given identifier already exists.")  # error 409 in case of resource conflict
-
-        # create an instance of the model
-        sf_endpoint = ServiceFunctionEndpoint(**resource)
-
-        return sf_endpoint
diff --git a/src/service/development.ini b/src/service/development.ini
index 1b02ebb9d1b9e200a6b05756c36be16a06103ab7..c18e5acaff93e89c53471bee1f1136c0f7af35f7 100644
--- a/src/service/development.ini
+++ b/src/service/development.ini
@@ -17,9 +17,6 @@ network_ues_path = /opt/clmc/src/service/resources/GraphAPI/network_ues.json
 # 10000 Mb/s = 10 Gb/s (static configuration of maximum theoretical bandwidth)
 network_bandwidth = 10000
 
-# PostgreSQL connection url
-sqlalchemy.url = postgresql://clmc:clmc_service@localhost:5432/whoamidb
-
 # InfluxDB connection
 influx_host = localhost
 influx_port = 8086
@@ -30,6 +27,7 @@ kapacitor_port = 9092
 
 # Neo4j connection
 neo4j_host = localhost
+neo4j_port = 7687
 neo4j_password = admin
 
 # CLMC service connection - port number is specified here so that the application can access this configuration, but also in the [server:main] config
@@ -42,7 +40,7 @@ listen = localhost:9080
 
 
 [loggers]
-keys = root, exc_logger, service_logger, sqlalchemy.engine.base.Engine
+keys = root, exc_logger, service_logger
 
 [handlers]
 keys = console, filelog, exc_handler
@@ -54,11 +52,6 @@ keys = generic, exc_formatter
 level = INFO
 handlers = console
 
-[logger_sqlalchemy.engine.base.Engine]
-level = INFO
-handlers =
-qualname = sqlalchemy.engine.base.Engine
-
 [logger_service_logger]
 level = INFO
 handlers = filelog
diff --git a/src/service/production.ini b/src/service/production.ini
index 9cd9e98b553f85f9bbbaafc0af0c092ee9db8fe0..518b22eb3113451490666c52f5cefa3c89bfb3a6 100644
--- a/src/service/production.ini
+++ b/src/service/production.ini
@@ -17,9 +17,6 @@ network_ues_path = /opt/clmc/src/service/resources/GraphAPI/network_ues.json
 # 10000 Mb/s = 10 Gb/s (static configuration of maximum theoretical bandwidth)
 network_bandwidth = 10000
 
-# PostgreSQL connection url
-sqlalchemy.url = postgresql://clmc:clmc_service@localhost:5432/whoamidb
-
 # InfluxDB connection
 influx_host = localhost
 influx_port = 8086
@@ -30,6 +27,7 @@ kapacitor_port = 9092
 
 # Neo4j connection
 neo4j_host = localhost
+neo4j_port = 7687
 neo4j_password = admin
 
 # CLMC service connection - port number is specified here so that the application can access this configuration, but also in the [server:main] config
@@ -42,7 +40,7 @@ listen = *:9080
 
 
 [loggers]
-keys = root, exc_logger, service_logger, sqlalchemy.engine.base.Engine
+keys = root, exc_logger, service_logger
 
 [handlers]
 keys = console, filelog, exc_handler
@@ -54,11 +52,6 @@ keys = generic, exc_formatter
 level = INFO
 handlers = console
 
-[logger_sqlalchemy.engine.base.Engine]
-level = INFO
-handlers =
-qualname = sqlalchemy.engine.base.Engine
-
 [logger_service_logger]
 level = INFO
 handlers = filelog
diff --git a/src/service/requirements.txt b/src/service/requirements.txt
index 9ad70ffe18fb8716354429dda872224fb9b34540..0cd402aafbd895415bba7a9df8480b3b64332112 100644
--- a/src/service/requirements.txt
+++ b/src/service/requirements.txt
@@ -1,9 +1,7 @@
 MarkupSafe==1.1.1
 PasteDeploy==2.0.1
 PrettyTable==0.7.2
-PyYAML==3.13
 Pygments==2.3.1
-SQLAlchemy==1.2.12
 WebOb==1.8.5
 asn1crypto==0.24.0
 atomicwrites==1.3.0
@@ -12,7 +10,6 @@ certifi==2019.3.9
 chardet==3.0.4
 click==7.0
 colorama==0.4.1
-filelock==3.0.10
 hupper==1.6.1
 idna==2.8
 ipaddress==1.0.22
@@ -21,17 +18,16 @@ neobolt==1.7.12
 pbr==5.2.0
 pluggy==0.11.0
 psutil==5.6.1
-psycopg2==2.7.5
 py==1.8.0
 pycparser==2.19
 pyparsing==2.4.0
 pyperclip==1.7.0
 pytz==2019.1
+pyyaml==3.13
 repoze.lru==0.7
 schema==0.6.8
 setuptools==41.0.1
 six==1.12.0
-toml==0.10.0
 translationstring==1.3
 urllib3==1.24.3
 venusian==1.2.0
@@ -59,12 +55,10 @@ influxdb==5.2.0
 plaster-pastedeploy==0.6
 py2neo==4.2.0
 pytest==3.8.1
-transaction==2.4.0
 
 pyOpenSSL==19.0.0
 pyramid==1.9.2
 tosca-parser==1.1.0
-zope.sqlalchemy==1.0
 
 pyramid-exclog==1.0
 pyramid-mako==1.0.2
diff --git a/src/service/setup.py b/src/service/setup.py
index 097fb2a91850353e98845f7643a0ad16ab7efdb2..f218927a5c76aa40fdd3433e4097811efe909ec9 100644
--- a/src/service/setup.py
+++ b/src/service/setup.py
@@ -58,9 +58,6 @@ requires = [
     'pyramid_debugtoolbar==4.5',
     'pyramid_exclog==1.0',
     'waitress==1.1.0',
-    'sqlalchemy==1.2.12',
-    'zope.sqlalchemy==1.0',
-    'psycopg2==2.7.5',
     'influxdb==5.2.0',
     'pyyaml==3.13',
     'tosca-parser==1.1.0',
@@ -98,9 +95,6 @@ setup(
     entry_points={
         'paste.app_factory': [
             'main = clmcservice:main',
-        ],
-        'console_scripts': [
-            'initialize_clmcservice_db = clmcservice.initialize_db:main',
         ]
     },
 )
diff --git a/src/test/VERSION b/src/test/VERSION
index ae058b3516610f17300d78d120ed958d5dd1e988..faf16644b2ebd4f9c9013ded49ba054d5fe10a28 100644
--- a/src/test/VERSION
+++ b/src/test/VERSION
@@ -1 +1 @@
-__version__ = "2.3.1"
\ No newline at end of file
+__version__ = "2.4.0"
\ No newline at end of file