diff --git a/.gitignore b/.gitignore
index 07e683d3972836af2d108f67b090eaa53a5eaf08..3606f48ab501a0bb12774f0c27851fc23e18d82e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,6 @@
-/src/clmc-spec/clmc-spec/nbproject/
-/src/clmc-spec/nbproject/
-/src/clmc-spec/target/
 .vagrant/
 .log
+*__pycache__*
+*__init__.pyc
+*egg-info*
 ubuntu-xenial-16.04-cloudimg-console.log
\ No newline at end of file
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index c30fd29feacbfffca292ee523e9514d470606a83..a6ad1e7dd32daa8c57daf1513daed66fc8365383 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,7 +1,7 @@
 stages:  
   - test:scripts
-  - test:streaming_sim
-  - test:telegraf_agents
+  - test:monitoring
+  - test:inputs
   
 test:scripts:
   stage: test:scripts
@@ -9,31 +9,31 @@ test:scripts:
     - vagrant --fixture=scripts -- destroy --force 
   script: 
     - vagrant --fixture=scripts -- up
-    - vagrant --fixture=scripts -- ssh test-runner -- -tt "cd /vagrant && pytest test/scripts/"
+    - vagrant --fixture=scripts -- ssh test-runner -- -tt "cd /vagrant && pytest clmctest/scripts/"
   after_script:
     - vagrant --fixture=scripts -- destroy --force       
   when: manual
   
-test:streaming_sim:
+test:monitoring:
   stage: test:streaming_sim
   before_script:
-    - vagrant --fixture=streaming-sim -- destroy --force    
+    - vagrant --fixture=monitoring -- destroy --force    
   script: 
-    - vagrant --fixture=streaming-sim -- up
-    - vagrant --fixture=streaming-sim -- ssh test-runner -- -tt "cd /vagrant && pytest test/streaming-sim/"
+    - vagrant --fixture=monitoring -- up
+    - vagrant --fixture=monitoring -- ssh test-runner -- -tt "cd /vagrant && pytest clmctest/monitoring/"
   after_script:
-    - vagrant --fixture=streaming-sim -- destroy --force   
+    - vagrant --fixture=monitoring -- destroy --force   
   when: manual  
   
-test:telegraf_agents:
-  stage: test:telegraf_agents
+test:inputs:
+  stage: test:inputs
   before_script:
-    - vagrant --fixture=telegraf-agents -- destroy --force      
+    - vagrant --fixture=inputs -- destroy --force      
   script: 
-    - vagrant --fixture=telegraf-agents -- up
-    - vagrant --fixture=telegraf-agents -- ssh test-runner -- -tt "cd /vagrant && pytest test/telegraf-agents/"
+    - vagrant --fixture=inputs -- up
+    - vagrant --fixture=inputs -- ssh test-runner -- -tt "cd /vagrant && pytest clmctest/inputs/"
   after_script:
-    - vagrant --fixture=telegraf-agents -- destroy --force       
+    - vagrant --fixture=inputs -- destroy --force       
   when: manual    
 
   
diff --git a/Vagrantfile b/Vagrantfile
index c5a42fe47ab9a6545276968f824d881ff295113d..47524d810802f3645e4d3c754121685030dcf66c 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -48,8 +48,8 @@ end
 
 # load custom config file
 puts "loading custom infrastructure configuration: #{fixture}"
-puts "custom config file: /test/#{fixture}/rspec.yml"
-host_rspec_file = "test/#{fixture}/rspec.yml"
+puts "custom config file: /clmctest/#{fixture}/rspec.yml"
+host_rspec_file = "clmctest/#{fixture}/rspec.yml"
 hosts = YAML.load_file(host_rspec_file)
 
 # Start creating VMS using xenial64 as the base box
@@ -89,21 +89,29 @@ Vagrant.configure("2") do |config|
       puts "Instance name #{instance_name}:"
       case instance_name
         when 'test-runner'
-          instance_config.vm.provision :shell, :path => "test/services/pytest/install.sh"
+          instance_config.vm.provision :shell, :path => "clmctest/services/pytest/install.sh"
         when 'clmc-service'
           instance_config.vm.provision :shell, :path => "scripts/clmc-service/install.sh"       
         else
           # specific service install
-          instance_config.vm.provision :shell, :path => "test/services/#{host["service_name"]}/install.sh", env: {"REPO_ROOT" => "/vagrant"}
+          instance_config.vm.provision :shell, :path => "clmctest/services/#{host["service_name"]}/install.sh", env: {"REPO_ROOT" => "/vagrant"}
     
           # CLMC agent install
           instance_config.vm.provision :shell, :path => "scripts/clmc-agent/install.sh"
 
           # CLMC agent service specific input configuration
-          instance_config.vm.provision :shell, inline: "cp /vagrant/test/services/#{host["service_name"]}/telegraf_#{host["service_name"]}.conf /etc/telegraf/telegraf.d/"     
+          instance_config.vm.provision :shell, inline: <<-SHELL
 
+            cp /vagrant/scripts/clmc-agent/telegraf.conf /etc/telegraf/  
+
+            cp /vagrant/scripts/clmc-agent/telegraf_output.conf /etc/telegraf/telegraf.d/                        
+            
+            cp /vagrant/clmctest/services/#{host["service_name"]}/telegraf_#{host["service_name"]}.conf /etc/telegraf/telegraf.d/ 
+
+          SHELL
+          
           # CLMC agent general and output configuration
-          instance_config.vm.provision :shell, :path => "scripts/clmc-agent/configure_template.sh"
+          #instance_config.vm.provision :shell, :path => "scripts/clmc-agent/configure_template.sh"
 
           instance_config.vm.provision :shell, :path => "scripts/clmc-agent/configure.sh", :args => "#{host["location"]} #{host["sfc_id"]} #{host["sfc_id_instance"]} #{host["sf_id"]} #{host["sf_id_instance"]} #{host["ipendpoint_id"]} #{host["influxdb_url"]} #{host["database_name"]}"  
 
diff --git a/clmctest/inputs/rspec.yml b/clmctest/inputs/rspec.yml
index 6dad4be08d89d3f1bcc31e65e92e909c591d14fd..b64c1462cf39d7a156ee28f218d08ffb596d5430 100644
--- a/clmctest/inputs/rspec.yml
+++ b/clmctest/inputs/rspec.yml
@@ -10,7 +10,7 @@ hosts:
         host: 8888
       - guest: 9092
         host: 9092
-    ip_address: "192.168.50.10"
+    ip_address: "203.0.113.100"
   - name: apache
     cpus: 1
     memory: 2048
@@ -19,14 +19,14 @@ hosts:
     forward_ports:
       - guest: 80
         host: 8881
-    ip_address: "192.168.50.11"
+    ip_address: "203.0.113.101"
     location: "DC1"
     sfc_id: "MS_Template_1"
     sfc_id_instance: "MS_I1"
     sf_id: "adaptive_streaming"
     sf_id_instance: "adaptive_streaming_I1"
     ipendpoint_id: "adaptive_streaming_I1_apache1"
-    influxdb_url: "http://192.168.50.10:8086"
+    influxdb_url: "http://203.0.113.100:8086"
     database_name: "CLMCMetrics"
   - name: nginx
     cpus: 1
@@ -36,14 +36,14 @@ hosts:
     forward_ports:
       - guest: 80
         host: 8082
-    ip_address: "192.168.50.13"
+    ip_address: "203.0.113.102"
     location: "DC1"
     sfc_id: "MS_Template_1"
     sfc_id_instance: "MS_I1"
     sf_id: "adaptive_streaming"
     sf_id_instance: "adaptive_streaming_nginx_I1"
     ipendpoint_id: "adaptive_streaming_nginx_I1_apache1"
-    influxdb_url: "http://192.168.50.10:8086"
+    influxdb_url: "http://203.0.113.100:8086"
     database_name: "CLMCMetrics"
   - name: mongo
     cpus: 1
@@ -53,14 +53,14 @@ hosts:
     forward_ports:
       - guest: 80
         host: 8083
-    ip_address: "192.168.50.14"
+    ip_address: "203.0.113.103"
     location: "DC1"
     sfc_id: "MS_Template_1"
     sfc_id_instance: "MS_I1"
     sf_id: "metadata_database"
     sf_id_instance: "metadata_database_I1"
     ipendpoint_id: "metadata_database_I1_apache1"
-    influxdb_url: "http://192.168.50.10:8086"
+    influxdb_url: "http://203.0.113.100:8086"
     database_name: "CLMCMetrics" 
   - name: ffmpeg
     cpus: 1
@@ -70,14 +70,14 @@ hosts:
     forward_ports:
       - guest: 80
         host: 8084
-    ip_address: "192.168.50.15"
+    ip_address: "203.0.113.104"
     location: "DC1"
     sfc_id: "MS_Template_1"
     sfc_id_instance: "MS_I1"
     sf_id: "metadata_database"
     sf_id_instance: "metadata_database_I1"
     ipendpoint_id: "metadata_database_I1_apache1"
-    influxdb_url: "http://192.168.50.10:8086"
+    influxdb_url: "http://203.0.113.100:8086"
     database_name: "CLMCMetrics" 
   - name: host
     cpus: 1
@@ -87,17 +87,17 @@ hosts:
     forward_ports:
       - guest: 80
         host: 8085
-    ip_address: "192.168.50.16"
+    ip_address: "203.0.113.105"
     location: "DC1"
     sfc_id: "MS_Template_1"
     sfc_id_instance: "MS_I1"
     sf_id: "adaptive_streaming"
     sf_id_instance: "adaptive_streaming_I1"
     ipendpoint_id: "adaptive_streaming_I1_apache1"
-    influxdb_url: "http://192.168.50.10:8086"
+    influxdb_url: "http://203.0.113.100:8086"
     database_name: "CLMCMetrics"
   - name: test-runner
     cpus: 1
     memory: 2048
     disk: "10GB"
-    ip_address: "192.168.50.17"    
\ No newline at end of file
+    ip_address: "203.0.113.150"    
\ No newline at end of file
diff --git a/clmctest/monitoring/rspec.yml b/clmctest/monitoring/rspec.yml
index d59e14971b9435ca1fec03145b5b089983125731..03bb6d746208d1bfec28dbeda791242d5c54cbe0 100644
--- a/clmctest/monitoring/rspec.yml
+++ b/clmctest/monitoring/rspec.yml
@@ -19,7 +19,7 @@ hosts:
     forward_ports:
       - guest: 80
         host: 8081
-    ip_address: "203.0.113.101"
+    ip_address: "203.0.113.140"
     location: "DC1"
     sfc_id: "MS_Template_1"
     sfc_id_instance: "MS_I1"
@@ -36,7 +36,7 @@ hosts:
     forward_ports:
       - guest: 80
         host: 8082
-    ip_address: "203.0.113.102"
+    ip_address: "203.0.113.141"
     location: "DC2"
     sfc_id: "MS_Template_1"
     sfc_id_instance: "MS_I1"
@@ -49,4 +49,4 @@ hosts:
     cpus: 1
     memory: 2048
     disk: "10GB"
-    ip_address: "203.0.113.103"       
\ No newline at end of file
+    ip_address: "203.0.113.150"       
\ No newline at end of file
diff --git a/clmctest/scripts/rspec.yml b/clmctest/scripts/rspec.yml
index f69ab5dfa503ced367ea50eb53bd1ca871669772..17f1d315febe35757fdddd21a9286861b2263cfa 100644
--- a/clmctest/scripts/rspec.yml
+++ b/clmctest/scripts/rspec.yml
@@ -3,5 +3,5 @@ hosts:
     cpus: 1
     memory: 2048
     disk: "10GB"
-    ip_address: "192.168.50.10"
+    ip_address: "200.0.113.150"
     
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 5165fa537d6b134be6eea200f57fdac425a68513..843d5fa627100de2e59023eba4214526504c6095 100644
--- a/setup.py
+++ b/setup.py
@@ -1,8 +1,26 @@
+import os
+import subprocess
+import time
 from setuptools import setup, find_packages
 
+def read(fname):
+    return open(os.path.join(os.path.dirname(__file__), fname)).read()
+
+def get_git_commit():
+    (out, err, return_code) = run_command('git rev-list --count integration')
+    print ("commit {0}".format(return_code))
+    exit
+    return out
+
+def run_command(cmd):
+    proc = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True)
+    out, err = proc.communicate()
+    return_code = proc.returncode
+    return out, err, return_code
+
 setup(
     name = "clmctest",
-    version = "0.0.1",
+    version = get_git_commit(),
     author = "Michael Boniface",
     author_email = "mjb@it-innovation.soton.ac.uk",
     description = "FLAME CLMC Testing Module",
diff --git a/test/__init__.py b/test/__init__.py
deleted file mode 100644
index 44f772595799f5fe338534918c95e23e08e80464..0000000000000000000000000000000000000000
--- a/test/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-#!/usr/bin/python3
\ No newline at end of file
diff --git a/test/scripts/rspec.yml b/test/scripts/rspec.yml
deleted file mode 100644
index f69ab5dfa503ced367ea50eb53bd1ca871669772..0000000000000000000000000000000000000000
--- a/test/scripts/rspec.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-hosts:
-  - name: test-runner
-    cpus: 1
-    memory: 2048
-    disk: "10GB"
-    ip_address: "192.168.50.10"
-    
\ No newline at end of file
diff --git a/test/scripts/test_config_telegraf.py b/test/scripts/test_config_telegraf.py
deleted file mode 100644
index c1d9393044663560aabbe26f5c63c6a42b7488f9..0000000000000000000000000000000000000000
--- a/test/scripts/test_config_telegraf.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/usr/bin/python3
-
-import pytest
-import subprocess
-
-def test_write_telegraf_conf():
-
-  # test telegraf monitoring configuration
-  TELEGRAF_CONF_DIR="/etc/telegraf"
-  LOCATION="DC1"
-  SFC_ID="media_service_A"
-  SFC_ID_INSTANCE="media_service_A_instance"
-  SF_ID="streaming_service"
-  SF_ID_INSTANCE="streaming_service_instance"
-  IP_ENDPOINT_ID="endpoint"
-  INFLUXDB_URL="http://172.29.236.10"
-  DATABASE_NAME="experimentation_database"  
-
-  try:
-    # run write config template script with no telegraf conf directory
-    cmd = 'sudo /vagrant/scripts/clmc-agent/configure_template.sh'
-    (out, err, code) = run_command(cmd)
-    assert code == 1, "Failed to catch error of no telegraf configuration directory : " + str(code) + ", cmd=" + cmd
-
-    # mk telegraf conf directory
-    run_command("sudo mkdir -p /etc/telegraf")
-
-    # run write config template  script with no telegraf.d directory
-    (out, err, code) = run_command(cmd)
-    assert code == 1, "Failed to catch error of no telegraf include directory : " + str(code) + ", cmd=" + cmd
-
-    # mk telegraf.d directory
-    run_command("sudo mkdir -p /etc/telegraf/telegraf.d")    
-
-    # run write config template script and check that the script has exited correctly
-    (out, err, code) = run_command(cmd)
-    assert code == 0, "Failed to write configuration files : " + str(code) + ", cmd=" + cmd
-
-    # run template relacement script with incorrect arguments
-    cmd = 'sudo /vagrant/scripts/clmc-agent/configure.sh' 
-    (out, err, code) = run_command(cmd)
-    assert code == 1, "Failed to return error on incorrect arguments : " + str(code) + ", cmd=" + cmd  
-
-    # run template relacement script with all arguments
-    cmd = 'sudo /vagrant/scripts/clmc-agent/configure.sh ' + LOCATION + ' ' + SFC_ID + ' ' + SFC_ID_INSTANCE + ' ' + SF_ID + ' ' + SF_ID_INSTANCE + ' ' + IP_ENDPOINT_ID + ' ' + INFLUXDB_URL + ' ' + DATABASE_NAME
-    (out, err, code) = run_command(cmd)
-    assert code == 0, "Configure command returned error, output=" + str(out) + ", cmd=" + cmd
-
-    # check that replacement was correct in telegraf.conf
-    try:        
-        TELEGRAF_GENERAL_CONF_FILE = TELEGRAF_CONF_DIR + "/telegraf.conf"
-        with open(TELEGRAF_GENERAL_CONF_FILE) as general_conf:
-          lines = general_conf.read()          
-          assert lines.find(LOCATION), "Cannot find location" 
-          assert lines.find(SFC_ID), "Cannot find sfc_id"
-          assert lines.find(SFC_ID_INSTANCE), "Cannot find sfc_id_instance"  
-          assert lines.find(SF_ID), "Cannot find sfc_id"            
-          assert lines.find(SF_ID_INSTANCE), "Cannot find sf_id_instance"
-          assert lines.find(IP_ENDPOINT_ID), "Cannot find location"                      
-    except FileNotFoundError:
-        assert False, "Telegraf general conf file not found, " + TELEGRAF_GENERAL_CONF_FILE
-
-    # check that replacement was correct in telegraf_output.conf
-    try:
-        TELEGRAF_OUTPUT_CONF_FILE = TELEGRAF_CONF_DIR + "/telegraf.d/telegraf_output.conf"
-        with open(TELEGRAF_OUTPUT_CONF_FILE) as output_conf:
-          lines = output_conf.read()
-          assert lines.find(INFLUXDB_URL), "Cannot find influx_db" 
-          assert lines.find(DATABASE_NAME), "Cannot find database"                    
-    except FileNotFoundError:
-        assert False, "Telegraf output conf file not found, " + TELEGRAF_OUTPUT_CONF_FILE
-
-  finally:
-      # clean up telegraf after test
-      run_command("sudo rm -rf /etc/telegraf")
-      print ("finally")
-
-# wrapper for executing commands on the cli, returning (std_out, std_error, process_return_code)
-def run_command(cmd):
-    """Run a shell command.
-
-    Arguments:
-        cmd {string} -- command to run in the shell
-
-    Returns:
-        stdout, stderr, exit code -- tuple of the process's stdout, stderr and exit code (0 on success)
-    """
-    proc = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True)
-    out, err = proc.communicate()
-    return_code = proc.returncode
-    return out, err, return_code
-
diff --git a/test/services/apache/install.sh b/test/services/apache/install.sh
deleted file mode 100755
index fc93bf5d7ffa2944f9df040a7df83f553db0ba5f..0000000000000000000000000000000000000000
--- a/test/services/apache/install.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-#/////////////////////////////////////////////////////////////////////////
-#//
-#// (c) University of Southampton IT Innovation Centre, 2017
-#//
-#// Copyright in this software belongs to University of Southampton
-#// IT Innovation Centre of Gamma House, Enterprise Road,
-#// Chilworth Science Park, Southampton, SO16 7NS, UK.
-#//
-#// This software may not be used, sold, licensed, transferred, copied
-#// or reproduced in whole or in part in any manner or form or in or
-#// on any media by any person other than in accordance with the terms
-#// of the Licence Agreement supplied with the software, or otherwise
-#// without the prior written consent of the copyright owners.
-#//
-#// This software is distributed WITHOUT ANY WARRANTY, without even the
-#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-#// PURPOSE, except where stated in the Licence Agreement supplied with
-#// the software.
-#//
-#//      Created By :            Michael Boniface
-#//      Created Date :          23/01/2018
-#//      Created for Project :   FLAME
-#//
-#/////////////////////////////////////////////////////////////////////////
-
-# Install apache
-sudo apt-get update
-sudo apt-get -y install apache2
-
-# start apache
-apachectl -k start
-apachectl -k restart
diff --git a/test/services/apache/telegraf_apache.conf b/test/services/apache/telegraf_apache.conf
deleted file mode 100644
index dd614108a27146a226b508c8095c6ab1be6eb86d..0000000000000000000000000000000000000000
--- a/test/services/apache/telegraf_apache.conf
+++ /dev/null
@@ -1,19 +0,0 @@
-[[inputs.apache]]
-  ## An array of URLs to gather from, must be directed at the machine
-  ## readable version of the mod_status page including the auto query string.
-  ## Default is "http://localhost/server-status?auto".
-  urls = ["http://localhost:8890/server-status?auto"]
-
-  ## Credentials for basic HTTP authentication.
-  # username = "myuser"
-  # password = "mypassword"
-
-  ## Maximum time to receive response.
-  # response_timeout = "5s"
-
-  ## Optional SSL Config
-  # ssl_ca = "/etc/telegraf/ca.pem"
-  # ssl_cert = "/etc/telegraf/cert.pem"
-  # ssl_key = "/etc/telegraf/key.pem"
-  ## Use SSL but skip chain & host verification
-  # insecure_skip_verify = false
\ No newline at end of file
diff --git a/test/services/ffmpeg/install.sh b/test/services/ffmpeg/install.sh
deleted file mode 100755
index 02d6e1e3d7394a636b80731ac0c3d8aa5d295e1e..0000000000000000000000000000000000000000
--- a/test/services/ffmpeg/install.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/bash
-
-apt-get update
-apt-get -y install zip python
-
-wget http://zebulon.bok.net/Bento4/binaries/Bento4-SDK-1-5-1-621.x86_64-unknown-linux.zip
-unzip Bento4-SDK-1-5-1-621.x86_64-unknown-linux.zip
-
-mv Bento4-SDK-1-5-1-621.x86_64-unknown-linux /opt/ 
-rm Bento4-SDK-1-5-1-621.x86_64-unknown-linux.zip
-
-add-apt-repository -y ppa:jonathonf/ffmpeg-3
-apt-get update && apt -y install ffmpeg libav-tools x264 x265
-
-ffmpeg -version
diff --git a/test/services/ffmpeg/telegraf_ffmpeg.conf b/test/services/ffmpeg/telegraf_ffmpeg.conf
deleted file mode 100644
index efe72dc2d05e785c1615f1b6c55794294f630be2..0000000000000000000000000000000000000000
--- a/test/services/ffmpeg/telegraf_ffmpeg.conf
+++ /dev/null
@@ -1,15 +0,0 @@
-# # Influx HTTP write listener
-[[inputs.http_listener]]
-  ## Address and port to host HTTP listener on
-  service_address = ":8186"
-
-  ## timeouts
-  read_timeout = "10s"
-  write_timeout = "10s"
-
-  ## HTTPS
-  #tls_cert= "/etc/telegraf/cert.pem"
-  #tls_key = "/etc/telegraf/key.pem"
-
-  ## MTLS
-  #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
\ No newline at end of file
diff --git a/test/services/ffmpeg/transcode.sh b/test/services/ffmpeg/transcode.sh
deleted file mode 100755
index 877472b2be6f55f5ec8a60d9f2b3e41ba57ae301..0000000000000000000000000000000000000000
--- a/test/services/ffmpeg/transcode.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-#
-# Bento4 must be manually obtained from https://www.bento4.com/downloads/ ffmpeg can be installed as follows 
-# (Debian - http://www.deb-multimedia.org/) ~$ 
-# sudo echo deb http://www.deb-multimedia.org jessie main non-free >> /etc/apt/sources.list
-# ~$ sudo apt update ~$ sudo apt install deb-multimedia-keyring ~$ sudo apt update && sudo apt-get dist-upgrade
-#
-# First encode the video to 24fps!!! and MP4 (h.264)
-#
-# Video MP4 file
-INPUT=$1
-OUTPUT_iFRAMES="$1-iFrames.mp4"
-OUTPUT_FRAGMENTED="$OUTPUT_iFRAMES-Fragmented.mp4"
-
-OUTPUT_FOLDER_NAME=$(date +%Y%m%d%H%M%S)$1
-echo "OUTPUT_FOLDER_NAME: "$OUTPUT_FOLDER_NAME
-
-rm -rf $OUTPUT_FOLDER_NAME
-mkdir $OUTPUT_FOLDER_NAME
-
-# Insert Correct number of I frames
-#ffmpeg -y -i $INPUT -profile:v baseline -level 3.0 -c:a libfdk_aac -ac 2 -ab 128k -c:v libx264 -x264opts 'keyint=24:min-keyint=24:no-scenecut' -b:v 400k -maxrate 400k -bufsize 1000k -vf "scale=-1:360" $OUTPUT_iFRAMES
-
-ffmpeg -y -i $INPUT -profile:v baseline -level 3.0 -c:a aac -ac 2 -ab 128k -c:v libx264 -x264opts 'keyint=24:min-keyint=24:no-scenecut' -b:v 400k -maxrate 400k -bufsize 1000k -vf "scale=-1:360" -strict experimental $OUTPUT_FOLDER_NAME"/"$OUTPUT_iFRAMES
-
-# fragment MP4
-/opt/Bento4-SDK-1-5-1-621.x86_64-unknown-linux/bin/mp4fragment --timescale 1000 $OUTPUT_FOLDER_NAME"/"$OUTPUT_iFRAMES $OUTPUT_FOLDER_NAME"/"$OUTPUT_FRAGMENTED
-
-# Option 1 with Bento4
-/opt/Bento4-SDK-1-5-1-621.x86_64-unknown-linux/bin/mp4dash --mpd-name=stream.mpd --use-segment-list --use-compat-namespace -o $OUTPUT_FOLDER_NAME"/"$OUTPUT_FOLDER_NAME $OUTPUT_FOLDER_NAME"/"$OUTPUT_FRAGMENTED 
-
-cd $OUTPUT_FOLDER_NAME
-tar -cvzf $OUTPUT_FOLDER_NAME".gz" $OUTPUT_FOLDER_NAME
-
diff --git a/test/services/host/install.sh b/test/services/host/install.sh
deleted file mode 100644
index 83cc525f8235c693f9e3a327923b3dee4287178a..0000000000000000000000000000000000000000
--- a/test/services/host/install.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-#/////////////////////////////////////////////////////////////////////////
-#//
-#// (c) University of Southampton IT Innovation Centre, 2017
-#//
-#// Copyright in this software belongs to University of Southampton
-#// IT Innovation Centre of Gamma House, Enterprise Road,
-#// Chilworth Science Park, Southampton, SO16 7NS, UK.
-#//
-#// This software may not be used, sold, licensed, transferred, copied
-#// or reproduced in whole or in part in any manner or form or in or
-#// on any media by any person other than in accordance with the terms
-#// of the Licence Agreement supplied with the software, or otherwise
-#// without the prior written consent of the copyright owners.
-#//
-#// This software is distributed WITHOUT ANY WARRANTY, without even the
-#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-#// PURPOSE, except where stated in the Licence Agreement supplied with
-#// the software.
-#//
-#//      Created By :            Michael Boniface
-#//      Created Date :          23/01/2018
-#//      Created for Project :   FLAME
-#//
-#/////////////////////////////////////////////////////////////////////////
-
-# Install host 
-# This is a dummy script as the endpoint is driven by simulation
\ No newline at end of file
diff --git a/test/services/host/telegraf_host.conf b/test/services/host/telegraf_host.conf
deleted file mode 100644
index 1fdd33a7b4c8eef188469cd9c94bcef746351ed4..0000000000000000000000000000000000000000
--- a/test/services/host/telegraf_host.conf
+++ /dev/null
@@ -1,80 +0,0 @@
-
-###############################################################################
-#                                  INPUTS                                     #
-###############################################################################
-# # Read metrics about network interface usage
- [[inputs.net]]
-#   ## By default, telegraf gathers stats from any up interface (excluding loopback)
-#   ## Setting interfaces will tell it to gather these explicit interfaces,
-#   ## regardless of status.
-#   ##
-#   # interfaces = ["eth0"]
-
-# Read metrics about cpu usage
-[[inputs.cpu]]
-  ## Whether to report per-cpu stats or not
-  percpu = true
-  ## Whether to report total system cpu stats or not
-  totalcpu = true
-  ## If true, collect raw CPU time metrics.
-  collect_cpu_time = false
-  ## If true, compute and report the sum of all non-idle CPU states.
- #report_active = false
-
-
-# Read metrics about disk usage by mount point
-[[inputs.disk]]
-  ## By default, telegraf gather stats for all mountpoints.
-  ## Setting mountpoints will restrict the stats to the specified mountpoints.
-  # mount_points = ["/"]
-
-  ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
-  ## present on /run, /var/run, /dev/shm or /dev).
-  ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
-
-
-# Read metrics about disk IO by device
-[[inputs.diskio]]
-  ## By default, telegraf will gather stats for all devices including
-  ## disk partitions.
-  ## Setting devices will restrict the stats to the specified devices.
-  # devices = ["sda", "sdb"]
-  ## Uncomment the following line if you need disk serial numbers.
-  # skip_serial_number = false
-  #
-  ## On systems which support it, device metadata can be added in the form of
-  ## tags.
-  ## Currently only Linux is supported via udev properties. You can view
-  ## available properties for a device by running:
-  ## 'udevadm info -q property -n /dev/sda'
-  # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
-  #
-  ## Using the same metadata source as device_tags, you can also customize the
-  ## name of the device via templates.
-  ## The 'name_templates' parameter is a list of templates to try and apply to
-  ## the device. The template may contain variables in the form of '$PROPERTY' or
-  ## '${PROPERTY}'. The first template which does not contain any variables not
-  ## present for the device is used as the device name tag.
-  ## The typical use case is for LVM volumes, to get the VG/LV name instead of
-  ## the near-meaningless DM-0 name.
-  # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
-
-# Read metrics about memory usage
-[[inputs.mem]]
-  # no configuration
-
-# # Influx HTTP write listener
-[[inputs.http_listener]]
-  ## Address and port to host HTTP listener on
-  service_address = ":8186"
-
-  ## timeouts
-  read_timeout = "10s"
-  write_timeout = "10s"
-
-  ## HTTPS
-  #tls_cert= "/etc/telegraf/cert.pem"
-  #tls_key = "/etc/telegraf/key.pem"
-
-  ## MTLS
-  #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
\ No newline at end of file
diff --git a/test/services/ipendpoint/install.sh b/test/services/ipendpoint/install.sh
deleted file mode 100755
index 7cc5c340a91da881e4ea51765438c2d02346c100..0000000000000000000000000000000000000000
--- a/test/services/ipendpoint/install.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-#/////////////////////////////////////////////////////////////////////////
-#//
-#// (c) University of Southampton IT Innovation Centre, 2017
-#//
-#// Copyright in this software belongs to University of Southampton
-#// IT Innovation Centre of Gamma House, Enterprise Road,
-#// Chilworth Science Park, Southampton, SO16 7NS, UK.
-#//
-#// This software may not be used, sold, licensed, transferred, copied
-#// or reproduced in whole or in part in any manner or form or in or
-#// on any media by any person other than in accordance with the terms
-#// of the Licence Agreement supplied with the software, or otherwise
-#// without the prior written consent of the copyright owners.
-#//
-#// This software is distributed WITHOUT ANY WARRANTY, without even the
-#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-#// PURPOSE, except where stated in the Licence Agreement supplied with
-#// the software.
-#//
-#//      Created By :            Michael Boniface
-#//      Created Date :          23/01/2018
-#//      Created for Project :   FLAME
-#//
-#/////////////////////////////////////////////////////////////////////////
-
-# Install ipendpoint
-# This is a dummy script as the endpoint is driven by simulation
\ No newline at end of file
diff --git a/test/services/ipendpoint/telegraf_ipendpoint.conf b/test/services/ipendpoint/telegraf_ipendpoint.conf
deleted file mode 100644
index efe72dc2d05e785c1615f1b6c55794294f630be2..0000000000000000000000000000000000000000
--- a/test/services/ipendpoint/telegraf_ipendpoint.conf
+++ /dev/null
@@ -1,15 +0,0 @@
-# # Influx HTTP write listener
-[[inputs.http_listener]]
-  ## Address and port to host HTTP listener on
-  service_address = ":8186"
-
-  ## timeouts
-  read_timeout = "10s"
-  write_timeout = "10s"
-
-  ## HTTPS
-  #tls_cert= "/etc/telegraf/cert.pem"
-  #tls_key = "/etc/telegraf/key.pem"
-
-  ## MTLS
-  #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
\ No newline at end of file
diff --git a/test/services/loadtest-streaming/install.sh b/test/services/loadtest-streaming/install.sh
deleted file mode 100755
index 7d6ef6ddc357283c53da5f0475a517002c685f14..0000000000000000000000000000000000000000
--- a/test/services/loadtest-streaming/install.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-#/////////////////////////////////////////////////////////////////////////
-#//
-#// (c) University of Southampton IT Innovation Centre, 2017
-#//
-#// Copyright in this software belongs to University of Southampton
-#// IT Innovation Centre of Gamma House, Enterprise Road,
-#// Chilworth Science Park, Southampton, SO16 7NS, UK.
-#//
-#// This software may not be used, sold, licensed, transferred, copied
-#// or reproduced in whole or in part in any manner or form or in or
-#// on any media by any person other than in accordance with the terms
-#// of the Licence Agreement supplied with the software, or otherwise
-#// without the prior written consent of the copyright owners.
-#//
-#// This software is distributed WITHOUT ANY WARRANTY, without even the
-#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-#// PURPOSE, except where stated in the Licence Agreement supplied with
-#// the software.
-#//
-#//      Created By :            Michael Boniface
-#//      Created Date :          14/02/2018
-#//      Created for Project :   FLAME
-#//
-#/////////////////////////////////////////////////////////////////////////
-set -euo pipefail
-
-echo "REPO_ROOT:"$REPO_ROOT
-eval '$REPO_ROOT/test/services/vlc/install.sh'
-eval '$REPO_ROOT/test/services/pytest/install.sh'
\ No newline at end of file
diff --git a/test/services/loadtest-streaming/telegraf_loadtest_streaming.conf b/test/services/loadtest-streaming/telegraf_loadtest_streaming.conf
deleted file mode 100644
index 3e30465f39ca7035ec8217909c9bc5d29942fa4e..0000000000000000000000000000000000000000
--- a/test/services/loadtest-streaming/telegraf_loadtest_streaming.conf
+++ /dev/null
@@ -1,112 +0,0 @@
-# Telegraf configuration
-
-# Telegraf is entirely plugin driven. All metrics are gathered from the
-# declared inputs, and sent to the declared outputs.
-
-# Plugins must be declared in here to be active.
-# To deactivate a plugin, comment out the name and any variables.
-
-# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
-# file would generate.
-
-# Global tags can be specified here in key="value" format.
-[global_tags]
-  # location of the data centre
-  location="{{LOCATION}}"
-  # media service template id
-  sfc="{{SFC_ID}}"
-  # media service instance
-  sfc_i="{{SFC_ID_INSTANCE}}"
-  # service function type
-  sf="{{SF_ID}}"
-  # service function instance id
-  sf_i="{{SF_ID_INSTANCE}}"
-  # ipendpoint id aka surrogate instance
-  ipendpoint="{{IP_ENDPOINT_ID}}"
-
-# Configuration for telegraf agent
-[agent]
-  ## Default data collection interval for all inputs
-  interval = "10s"
-  ## Rounds collection interval to 'interval'
-  ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
-  round_interval = true
-
-  ## Telegraf will cache metric_buffer_limit metrics for each output, and will
-  ## flush this buffer on a successful write.
-  metric_buffer_limit = 1000
-  ## Flush the buffer whenever full, regardless of flush_interval.
-  flush_buffer_when_full = true
-
-  ## Collection jitter is used to jitter the collection by a random amount.
-  ## Each plugin will sleep for a random time within jitter before collecting.
-  ## This can be used to avoid many plugins querying things like sysfs at the
-  ## same time, which can have a measurable effect on the system.
-  collection_jitter = "0s"
-
-  ## Default flushing interval for all outputs. You shouldn't set this below
-  ## interval. Maximum flush_interval will be flush_interval + flush_jitter
-  flush_interval = "10s"
-  ## Jitter the flush interval by a random amount. This is primarily to avoid
-  ## large write spikes for users running a large number of telegraf instances.
-  ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
-  flush_jitter = "0s"
-
-  ## Logging configuration:
-  ## Run telegraf in debug mode
-  debug = false
-  ## Run telegraf in quiet mode
-  quiet = false
-  ## Specify the log file name. The empty string means to log to stdout.
-  logfile = "/var/log/telegraf/telegraf.log"
-
-  ## Override default hostname, if empty use os.Hostname()
-  hostname = ""
-
-
-###############################################################################
-#                                  OUTPUTS                                    #
-###############################################################################
-
-# Configuration for influxdb server to send metrics to
-[[outputs.influxdb]]
-  # The full HTTP or UDP endpoint URL for your InfluxDB instance.
-  # Multiple urls can be specified but it is assumed that they are part of the same
-  # cluster, this means that only ONE of the urls will be written to each interval.
-  # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
-  urls = ["{{INFLUXDB_URL}}"] # required
-  # The target database for metrics (telegraf will create it if not exists)
-  database = "{{DATABASE_NAME}}" # required
-  # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
-  # note: using second precision greatly helps InfluxDB compression
-  precision = "s"
-
-  ## Write timeout (for the InfluxDB client), formatted as a string.
-  ## If not provided, will default to 5s. 0s means no timeout (not recommended).
-  timeout = "5s"
-  # username = "telegraf"
-  # password = "metricsmetricsmetricsmetrics"
-  # Set the user agent for HTTP POSTs (can be useful for log differentiation)
-  # user_agent = "telegraf"
-  # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
-  # udp_payload = 512
-
-
-###############################################################################
-#                                  INPUTS                                     #
-###############################################################################
-# # Influx HTTP write listener
-[[inputs.http_listener]]
-  ## Address and port to host HTTP listener on
-  service_address = ":8186"
-
-  ## timeouts
-  read_timeout = "10s"
-  write_timeout = "10s"
-
-  ## HTTPS
-  #tls_cert= "/etc/telegraf/cert.pem"
-  #tls_key = "/etc/telegraf/key.pem"
-
-  ## MTLS
-  #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
\ No newline at end of file
diff --git a/test/services/mongo/install.sh b/test/services/mongo/install.sh
deleted file mode 100755
index e00502fe6746d41101372afe1f88a7ffcec11a1a..0000000000000000000000000000000000000000
--- a/test/services/mongo/install.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-#/////////////////////////////////////////////////////////////////////////
-#//
-#// (c) University of Southampton IT Innovation Centre, 2017
-#//
-#// Copyright in this software belongs to University of Southampton
-#// IT Innovation Centre of Gamma House, Enterprise Road,
-#// Chilworth Science Park, Southampton, SO16 7NS, UK.
-#//
-#// This software may not be used, sold, licensed, transferred, copied
-#// or reproduced in whole or in part in any manner or form or in or
-#// on any media by any person other than in accordance with the terms
-#// of the Licence Agreement supplied with the software, or otherwise
-#// without the prior written consent of the copyright owners.
-#//
-#// This software is distributed WITHOUT ANY WARRANTY, without even the
-#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-#// PURPOSE, except where stated in the Licence Agreement supplied with
-#// the software.
-#//
-#//      Created By :            Michael Boniface
-#//      Created Date :          23/01/2018
-#//      Created for Project :   FLAME
-#//
-#/////////////////////////////////////////////////////////////////////////
-
-# Install apache
-sudo apt-get update
-sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 2930ADAE8CAF5059EE73BB4B58712A2291FA4AD5
-echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.6 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.6.list
-sudo apt-get update
-sudo apt-get install -y mongodb-org
-sudo service mongod start
\ No newline at end of file
diff --git a/test/services/mongo/telegraf_mongo.conf b/test/services/mongo/telegraf_mongo.conf
deleted file mode 100644
index 80a6a6964394ca33a50e1389121ea142e273bcc2..0000000000000000000000000000000000000000
--- a/test/services/mongo/telegraf_mongo.conf
+++ /dev/null
@@ -1,15 +0,0 @@
-[[inputs.mongodb]]
-  ## An array of URLs of the form:
-  ##   "mongodb://" [user ":" pass "@"] host [ ":" port]
-  ## For example:
-  ##   mongodb://user:auth_key@10.10.3.30:27017,
-  ##   mongodb://10.10.3.33:18832,
-  servers = ["mongodb://127.0.0.1:27017"]
-  gather_perdb_stats = false
-
-  ## Optional SSL Config
-  # ssl_ca = "/etc/telegraf/ca.pem"
-  # ssl_cert = "/etc/telegraf/cert.pem"
-  # ssl_key = "/etc/telegraf/key.pem"
-  ## Use SSL but skip chain & host verification
-  # insecure_skip_verify = false
\ No newline at end of file
diff --git a/test/services/nginx/install.sh b/test/services/nginx/install.sh
deleted file mode 100755
index fa0b75253e50c3b0486a6067b1b4f0d0cd68bcc1..0000000000000000000000000000000000000000
--- a/test/services/nginx/install.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash
-#/////////////////////////////////////////////////////////////////////////
-#//
-#// (c) University of Southampton IT Innovation Centre, 2017
-#//
-#// Copyright in this software belongs to University of Southampton
-#// IT Innovation Centre of Gamma House, Enterprise Road,
-#// Chilworth Science Park, Southampton, SO16 7NS, UK.
-#//
-#// This software may not be used, sold, licensed, transferred, copied
-#// or reproduced in whole or in part in any manner or form or in or
-#// on any media by any person other than in accordance with the terms
-#// of the Licence Agreement supplied with the software, or otherwise
-#// without the prior written consent of the copyright owners.
-#//
-#// This software is distributed WITHOUT ANY WARRANTY, without even the
-#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-#// PURPOSE, except where stated in the Licence Agreement supplied with
-#// the software.
-#//
-#//      Created By :            Michael Boniface
-#//      Created Date :          01/02/2018
-#//      Created for Project :   FLAME
-#//
-#/////////////////////////////////////////////////////////////////////////
-
-# Install nginx
-apt-get update
-yes Y | apt-get install nginx 
-
-# Need to set up basic stats as this not configured by default
-# http://nginx.org/en/docs/http/ngx_http_stub_status_module.html
-cp -rf $REPO_ROOT/test/services/nginx/nginx.conf /etc/nginx/nginx.conf
-nginx -s reload
-systemctl start nginx
\ No newline at end of file
diff --git a/test/services/nginx/nginx.conf b/test/services/nginx/nginx.conf
deleted file mode 100644
index 1c906bec4096ecd77a0eb8d46c091288841b92d5..0000000000000000000000000000000000000000
--- a/test/services/nginx/nginx.conf
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-events {
-  worker_connections  4096;  ## Default: 1024
-}
-http {
-	server {
-		location /nginx_status {
-		  stub_status on;
-		  access_log   off;
-		  allow all;
-	  }
-	}
-}
\ No newline at end of file
diff --git a/test/services/nginx/telegraf_nginx.conf b/test/services/nginx/telegraf_nginx.conf
deleted file mode 100644
index c91cdeb3265bf703a6438456d12942956938c96d..0000000000000000000000000000000000000000
--- a/test/services/nginx/telegraf_nginx.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-# Read Nginx's basic status information (ngx_http_stub_status_module)
-[[inputs.nginx]]
-  ## An array of Nginx stub_status URI to gather stats.
-  urls = ["http://localhost:80/nginx_status"]
-
-  ## HTTP response timeout (default: 5s)
-#  response_timeout = "5s"
\ No newline at end of file
diff --git a/test/services/pytest/install.sh b/test/services/pytest/install.sh
deleted file mode 100644
index ce998ad1deef418d05a721cce5cc20f3aa3bd098..0000000000000000000000000000000000000000
--- a/test/services/pytest/install.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-#/////////////////////////////////////////////////////////////////////////
-#//
-#// (c) University of Southampton IT Innovation Centre, 2017
-#//
-#// Copyright in this software belongs to University of Southampton
-#// IT Innovation Centre of Gamma House, Enterprise Road,
-#// Chilworth Science Park, Southampton, SO16 7NS, UK.
-#//
-#// This software may not be used, sold, licensed, transferred, copied
-#// or reproduced in whole or in part in any manner or form or in or
-#// on any media by any person other than in accordance with the terms
-#// of the Licence Agreement supplied with the software, or otherwise
-#// without the prior written consent of the copyright owners.
-#//
-#// This software is distributed WITHOUT ANY WARRANTY, without even the
-#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-#// PURPOSE, except where stated in the Licence Agreement supplied with
-#// the software.
-#//
-#//      Created By :            Michael Boniface
-#//      Created Date :          24/02/2018
-#//      Created for Project :   FLAME
-#//
-#/////////////////////////////////////////////////////////////////////////
-apt-get update
-apt-get -y install python3 python3-pip python-influxdb
-update-alternatives --install /usr/bin/python python /usr/bin/python3 10
-pip3 install pytest pyyaml
-pip3 install --upgrade influxdb
diff --git a/test/services/vlc/install.sh b/test/services/vlc/install.sh
deleted file mode 100755
index 02e3b8fe01696913d8e5b65967a152bfb485eee3..0000000000000000000000000000000000000000
--- a/test/services/vlc/install.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-#/////////////////////////////////////////////////////////////////////////
-#//
-#// (c) University of Southampton IT Innovation Centre, 2017
-#//
-#// Copyright in this software belongs to University of Southampton
-#// IT Innovation Centre of Gamma House, Enterprise Road,
-#// Chilworth Science Park, Southampton, SO16 7NS, UK.
-#//
-#// This software may not be used, sold, licensed, transferred, copied
-#// or reproduced in whole or in part in any manner or form or in or
-#// on any media by any person other than in accordance with the terms
-#// of the Licence Agreement supplied with the software, or otherwise
-#// without the prior written consent of the copyright owners.
-#//
-#// This software is distributed WITHOUT ANY WARRANTY, without even the
-#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-#// PURPOSE, except where stated in the Licence Agreement supplied with
-#// the software.
-#//
-#//      Created By :            Michael Boniface
-#//      Created Date :          12/02/2018
-#//      Created for Project :   FLAME
-#//
-#/////////////////////////////////////////////////////////////////////////
-
-add-apt-repository -y ppa:videolan/master-daily
-apt-get update
-apt-get -y install vlc
\ No newline at end of file
diff --git a/test/streaming-sim/LineProtocolGenerator.py b/test/streaming-sim/LineProtocolGenerator.py
deleted file mode 100644
index 1b19c3c50717b1d09f2db9fcac742fd77d0153a9..0000000000000000000000000000000000000000
--- a/test/streaming-sim/LineProtocolGenerator.py
+++ /dev/null
@@ -1,300 +0,0 @@
-#!/usr/bin/python3
-
-# line protocol
-
-# Method to create a full InfluxDB request statement (based on partial statement from client)
-import uuid
-from random import randint
-
-
-# Reports TX and RX, scaling on requested quality
-def generate_network_report(recieved_bytes, sent_bytes, time):
-    result = [{"measurement": "net_port_io",
-               "tags": {
-                   "port_id": "enps03"
-               },
-               "fields": {
-                   "RX_BYTES_PORT_M": recieved_bytes,
-                   "TX_BYTES_PORT_M": sent_bytes
-               },
-               "time": _getNSTime(time)
-               }]
-
-    return result
-
-
-# Formats VM config
-def generate_vm_config(state, cpu, mem, storage, time):
-    result = [{"measurement": "vm_res_alloc",
-               "tags": {
-                   "vm_state": state
-               },
-               "fields": {
-                   "cpu": cpu,
-                   "memory": mem,
-                   "storage": storage
-               },
-               "time": _getNSTime(time)
-               }]
-
-    return result
-
-
-# Reports cpu usage, scaling on requests
-def generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, time):
-    result = [{"measurement": "cpu_usage",
-              "fields": {
-                  "cpu_usage": cpu_usage,
-                  "cpu_active_time": cpu_active_time,
-                  "cpu_idle_time": cpu_idle_time
-              },
-              "time": _getNSTime(time)
-              }]
-
-    return result
-
-
-# Reports response times, scaling on number of requests
-def generate_mpegdash_report(resource, requests, avg_response_time, peak_response_time, time):
-    result = [{"measurement": "mpegdash_service",
-               "tags": {
-                   "cont_nav": resource
-               },
-               "fields": {
-                   "requests": requests,
-                   "avg_response_time": avg_response_time,
-                   "peak_response_time": peak_response_time
-               },
-               "time": _getNSTime(time)
-               }]
-
-    return result
-
-
-# ipendpoint_route,ipendpoint_id,cont_nav=FQDN HTTP_REQUESTS_FQDN_M, NETWORK_FQDN_LATENCY timestamp
-def generate_ipendpoint_route(resource, requests, latency, time):
-    result = [{"measurement": "ipendpoint_route",
-               "tags": {
-                   "cont_nav": str(resource)
-               },
-               "fields": {
-                   "http_requests_fqdn_m": requests,
-                   "network_fqdn_latency": latency
-               },
-               "time": _getNSTime(time)
-               }]
-
-    return result
-
-
-# InfluxDB likes to have time-stamps in nanoseconds
-def _getNSTime(time):
-    # Convert to nano-seconds
-    timestamp = int(1000000000*time)
-
-    return timestamp
-
-
-# DEPRECATED
-# ____________________________________________________________________________
-
-# DEPRECATED: old structure, not part of new spec
-
-# Influx needs strings to be quoted, this provides a utility interface to do this
-def quote_wrap(string):
-    return "\"" + string + "\""
-
-
-def _generateClientRequest(cReq, id, time):
-    # Tags first
-    result = 'sid="' + str(id) + '",' + cReq
-
-    # Fields
-    # No additional fields here yet
-
-    # Timestamp
-    result += ' ' + str(_getNSTime(time))
-
-    # Measurement
-    return 'request,' + result
-
-
-# Method to create a full InfluxDB response statement
-# DEPRECATED: old structure, not part of new spec
-def _generateServerResponse(reqID, quality, time, cpuUsage, qualityDifference):
-    # Tags first
-    result = ' '
-
-    # Fields
-    result += 'quality=' + str(quality) + ','
-    result += 'cpuUsage=' + str(cpuUsage) + ','
-    result += 'qualityDifference=' + str(qualityDifference) + ','
-    result += 'requestID="' + str(reqID) + '",'
-    result += 'index="' + str(uuid.uuid4()) + '"'
-
-    # Timestamp
-    result += ' ' + str(_getNSTime(time))
-
-    # Measurement
-    # print('response'+result)
-    return 'response' + result
-
-
-# Formats server config
-def _generateServerConfig(ID, location, cpu, mem, storage, time):
-    # metric
-    result = 'host_resource'
-    # Tags
-    result += ',slice_id=' + quote_wrap(ID)
-    result += ',location=' + quote_wrap(location)
-    result += ' '
-    # Fields
-    result += 'cpu=' + str(cpu)
-    result += ',memory=' + quote_wrap(mem)
-    result += ',storage=' + quote_wrap(storage)
-
-    # Time
-    result += ' ' + str(_getNSTime(time))
-
-    print(result)
-    return result
-
-
-# Format port config
-def _configure_port(port_id, state, rate, time):
-    # metric
-    result = 'net_port_config '
-    # Fields
-    result += 'port_id=' + quote_wrap('enps' + port_id)
-    result += ',port_state=' + quote_wrap(state)
-    result += ',tx_constraint=' + quote_wrap(rate)
-    result += ' '
-
-    # Time
-    result += ' ' + str(_getNSTime(time))
-
-    print(result)
-    return result
-
-
-# Format service function config
-def _configure_service_function(state, max_connected_clients):
-    # measurement
-    result = 'mpegdash_service_config'
-    # tags
-    result += ',service_state='+quote_wrap(state)
-    result += ' '
-    # fields
-    result += 'max_connected_clients='+str(max_connected_clients)
-
-    return result
-
-
-# Reports memory usage, scaling on requests
-def generate_mem_report(requests, total_mem, time):
-    # Measurement
-    result = 'mem'
-    result += ' '
-    # field
-    used = randint(0, min(100, 5*requests))
-    available = 100-used
-    result += 'available_percent='+str(available)
-    result += ',used_percent='+str(used)
-    result += ',total='+str(total_mem)
-    result += ' '
-    # Time
-    result += str(_getNSTime(time))
-    print(result)
-    return result
-
-
-# Formats compute node config
-def generate_compute_node_config(slice_id, location, node_id, cpus, mem, storage, time):
-    # Measurement
-    result = 'compute_node_config'
-    # CommonContext Tag
-    result += ',slide_id='+quote_wrap(slice_id)
-    # Tag
-    result += ',location='+quote_wrap(location)
-    result += ',comp_node_id='+quote_wrap(node_id)
-    result += ' '
-    # field
-    result += 'cpus='+str(cpus)
-    result += ',memory='+str(mem)
-    result += ',storage='+str(storage)
-    result += ' '
-    # Time
-    result += str(_getNSTime(time))
-    print(result)
-    return result
-
-
-# Formats network resource config
-def generate_network_resource_config(slice_id, network_id, bandwidth, time):
-    # Measurement
-    result = 'network_resource_config'
-    # Meta Tag
-    result += ',slice_id='+quote_wrap(slice_id)
-    # Tag
-    result += 'network_id='+quote_wrap(network_id)
-    result += ' '
-    # field
-    result += 'bandwidth='+str(bandwidth)
-    result += ' '
-    # Time
-    result += str(_getNSTime(time))
-    print(result)
-    return result
-
-
-# Formats network interface config
-def generate_network_interface_config(slice_id, comp_node_id, port_id, rx_constraint, tx_constraint, time):
-    # Measurement
-    result = 'network_interface_config'
-    # Meta Tag
-    result += ',slice_id'+quote_wrap(slice_id)
-    # Tags
-    result += ',comp_node_id='+quote_wrap(comp_node_id)
-    result += ',port_id='+quote_wrap(port_id)
-    result += ' '
-    # field
-    result += 'rx_constraint='+str(rx_constraint)
-    result += ',tx_constraint='+str(tx_constraint)
-    result += ' '
-    # Time
-    result += str(_getNSTime(time))
-    print(result)
-    return result
-
-
-# Format SF instance config
-def generate_sf_instance_surrogate_config(loc, sfc, sfc_i, sf_package, sf_i, cpus, mem, storage, time):
-    # Measurement
-    result = 'sf_instance_surrogate_config'
-    # Meta Tag
-    result += ',location'+quote_wrap(loc)
-    result += ',sfc'+quote_wrap(sfc)
-    result += ',sfc_i'+quote_wrap(sfc_i)
-    result += ',sf_package'+quote_wrap(sf_package)
-    result += ',sf_i'+quote_wrap(sf_i)
-    result += ' '
-    # field
-    result += 'cpus='+str(cpus)
-    result += ',memory='+str(mem)
-    result += ',storage='+str(storage)
-    result += ' '
-    # Time
-    result += str(_getNSTime(time))
-    print(result)
-    return result
-
-
-# Formats context container as part of other line protocol generators
-def service_function_measurement(measurement, service_function_context):
-    result = measurement
-    result += ',sfc'+quote_wrap(service_function_context.sfc)
-    result += ',sfc_i'+quote_wrap(service_function_context.sfc_i)
-    result += ',sf_package'+quote_wrap(service_function_context.sf_package)
-    result += ',sf_i'+quote_wrap(service_function_context.sf_i)
-
-    return result
diff --git a/test/streaming-sim/StreamingSim.py b/test/streaming-sim/StreamingSim.py
deleted file mode 100644
index cb66ccb72d01a783d918b73d88bdc48af9db249f..0000000000000000000000000000000000000000
--- a/test/streaming-sim/StreamingSim.py
+++ /dev/null
@@ -1,292 +0,0 @@
-#!/usr/bin/python3
-
-import LineProtocolGenerator as lp
-import time
-import urllib.parse
-import pytest
-import random
-import sys
-from influxdb import InfluxDBClient
-
-# Simulation parameters
-TICK_TIME = 1
-DEFAULT_REQUEST_RATE_INC = 1
-DEFAULT_REQUEST_RATE_INC_PERIOD = 10
-SIMULATION_TIME_SEC = 60 * 60
-
-# CLMC parameters
-INFLUX_DB_URL = 'http://172.23.1.20:8086'
-INFLUX_DB_NAME = 'CLMCMetrics'
-AGENT1_URL = 'http://172.23.1.21:8186'
-AGENT2_URL = 'http://172.23.1.22:8186'
-
-
-class Sim(object):
-    """
-    Simulator for services
-    """
-
-    def __init__(self, influx_url, influx_db_name, agent1_url, agent2_url):
-        """
-        Sets up the simulator object
-
-        :param influx_url: the influx DB url
-        :param influx_db_name: the influx DB name
-        """
-
-        self.influx_db_name = influx_db_name
-        self.agent1_url = agent1_url       
-        self.agent2_url = agent2_url          
-
-        # influx db client is created on initialisation, which will handle the influx DB queries
-        url_object = urllib.parse.urlparse(influx_url)
-        self.db_client = InfluxDBClient(host=url_object.hostname, port=url_object.port, database=self.influx_db_name, timeout=10)
-
-    def reset(self):
-        """
-        Resets the influx db by deleting the old database and creating a new one
-        """
-
-        # Teardown DB from previous sim and bring it back up
-        self.db_client.drop_database(self.influx_db_name)
-        self.db_client.create_database(self.influx_db_name)
-
-    def run(self, simulation_length_seconds):
-        """
-        Runs the simulation
-
-        :param simulation_length_seconds: length of simulation
-        """
-
-        start_time = time.time() - SIMULATION_TIME_SEC
-        sim_time = start_time
-
-        # segment_size : the length of video requested at a time
-        # bit_rate: MPEG-2 High 1080p 25fps = 80Mbps
-        ip_endpoints = [{'agent_url': self.agent1_url, 'location': 'DC1', 'cpu': 16,
-                         'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0,
-                         'segment_size': 2, 'video_bit_rate': 80, 'packet_size': 1500},
-                        {'agent_url': self.agent2_url, 'location': 'DC2', 'cpu': 4,
-                         'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0,
-                         'segment_size': 2, 'video_bit_rate': 80, 'packet_size': 1500}
-                        ]
-
-        # Simulate configuration of the ipendpoints
-        # endpoint state->mu, sigma, secs normal distribution
-        config_delay_dist = {"placing": [10, 0.68], "booting": [10, 0.68], "connecting": [10, 0.68]}
-
-        print("\nSimulation started. Generating data...")
-
-        # Place endpoints
-        max_delay = 0
-        for ip_endpoint in ip_endpoints:
-            agent_url = urllib.parse.urlparse(ip_endpoint["agent_url"])
-            agent_db_client = InfluxDBClient(host=agent_url.hostname, port=agent_url.port, database=self.influx_db_name, timeout=10)
-            delay_time = self._changeVMState(agent_db_client, sim_time, ip_endpoint, config_delay_dist['placing'][0],
-                                             config_delay_dist['placing'][0] * config_delay_dist['placing'][1],
-                                             'placing', 'placed')
-            max_delay = max(delay_time, max_delay)
-        sim_time += max_delay
-
-        # Boot endpoints
-        max_delay = 0
-        for ip_endpoint in ip_endpoints:
-            agent_url = urllib.parse.urlparse(ip_endpoint["agent_url"])
-            agent_db_client = InfluxDBClient(host=agent_url.hostname, port=agent_url.port, database=self.influx_db_name, timeout=10)
-            delay_time = self._changeVMState(agent_db_client, sim_time, ip_endpoint, config_delay_dist['booting'][0],
-                                             config_delay_dist['booting'][0] * config_delay_dist['booting'][1],
-                                             'booting', 'booted')
-            max_delay = max(delay_time, max_delay)
-        sim_time += max_delay
-
-        # Connect endpoints
-        max_delay = 0
-        for ip_endpoint in ip_endpoints:
-            agent_url = urllib.parse.urlparse(ip_endpoint["agent_url"])
-            agent_db_client = InfluxDBClient(host=agent_url.hostname, port=agent_url.port, database=self.influx_db_name, timeout=10)
-            delay_time = self._changeVMState(agent_db_client, sim_time, ip_endpoint, config_delay_dist['connecting'][0],
-                                             config_delay_dist['connecting'][0] * config_delay_dist['connecting'][1],
-                                             'connecting', 'connected')
-            max_delay = max(delay_time, max_delay)
-        sim_time += max_delay
-
-        request_arrival_rate_inc = DEFAULT_REQUEST_RATE_INC
-        inc_period_count = 0
-        for i in range(simulation_length_seconds):
-            for ip_endpoint in ip_endpoints:
-                agent_url = urllib.parse.urlparse(ip_endpoint["agent_url"])
-                agent_db_client = InfluxDBClient(host=agent_url.hostname, port=agent_url.port, database=self.influx_db_name, timeout=10)
-
-                # linear inc to arrival rate
-                if inc_period_count >= DEFAULT_REQUEST_RATE_INC_PERIOD:
-                    ip_endpoint['request_arrival_rate'] += request_arrival_rate_inc
-                    inc_period_count = 0
-                else:
-                    inc_period_count += 1
-                # add new requests to the queue
-                ip_endpoint['request_queue'] += ip_endpoint['request_arrival_rate']
-
-                # time to process one second of video (mS) in the current second
-                request_processing_time = int(random.normalvariate(10, 10 * 0.68))
-                request_processing_time = max(request_processing_time, 10)
-                # time depends on the length of the segments in seconds
-                request_processing_time *= ip_endpoint['segment_size']
-
-                # amount of cpu time (mS) per tick
-                cpu_time_available = ip_endpoint['cpu'] * TICK_TIME * 1000
-                max_requests_processed = int(cpu_time_available / request_processing_time)
-                # calc how many requests processed
-                if ip_endpoint['request_queue'] <= max_requests_processed:
-                    # processed all of the requests
-                    requests_processed = ip_endpoint['request_queue']
-                else:
-                    # processed the maximum number of requests
-                    requests_processed = max_requests_processed
-
-                # calculate cpu usage
-                cpu_active_time = int(requests_processed * request_processing_time)
-                cpu_idle_time = int(cpu_time_available - cpu_active_time)
-                cpu_usage = cpu_active_time / cpu_time_available
-                agent_db_client.write_points(lp.generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, sim_time))
-
-                # calc network usage metrics
-                bytes_rx = 2048 * requests_processed
-                bytes_tx = int(
-                    ip_endpoint['video_bit_rate'] / 8 * 1000000 * requests_processed * ip_endpoint['segment_size'])
-                agent_db_client.write_points(lp.generate_network_report(bytes_rx, bytes_tx, sim_time))
-
-                # time to process all of the requests in the queue
-                peak_response_time = ip_endpoint['request_queue'] * request_processing_time / ip_endpoint['cpu']
-                # mid-range 
-                avg_response_time = (peak_response_time + request_processing_time) / 2
-                agent_db_client.write_points(lp.generate_mpegdash_report('http://localhost/server-status?auto', ip_endpoint['request_arrival_rate'],
-                                                                         avg_response_time, peak_response_time, sim_time))
-
-                # need to calculate this but sent at 5mS for now
-                network_request_delay = 0.005
-
-                # calculate network response delays (2km link, 100Mbps)
-                network_response_delay = self._calcNetworkDelay(2000, 100, ip_endpoint['packet_size'], ip_endpoint['video_bit_rate'])
-
-                e2e_delay = network_request_delay + (avg_response_time / 1000) + network_response_delay
-                agent_db_client.write_points(lp.generate_ipendpoint_route('http://localhost/server-status?auto', ip_endpoint['request_arrival_rate'], e2e_delay, sim_time))
-
-                # remove requests processed off the queue
-                ip_endpoint['request_queue'] -= int(requests_processed)
-
-            sim_time += TICK_TIME
-        end_time = sim_time
-        print("Simulation Finished. Start time {0}. End time {1}. Total time {2}".format(start_time, end_time,
-                                                                                         end_time - start_time))
-
-    @staticmethod
-    def _calcNetworkDelay(distance, bandwidth, packet_size, tx_video_bit_rate):
-        """
-        Calculates the network delay. Declared as static method since it doesn't need access to any instance variables.
-
-        :param distance: distance metres
-        :param bandwidth: bandwidth Mbps
-        :param packet_size: packet size bytes
-        :param tx_video_bit_rate: bp/sec
-        :return: the calculated network delay
-        """
-
-        # propogation delay = distance/speed () (e.g 2000 metres * 2*10^8 for optical fibre)
-        propogation_delay = distance / (2 * 100000000)
-        # packetisation delay = ip packet size (bits)/tx rate (e.g. 100Mbp with  0% packet loss)
-        packetisation_delay = (packet_size * 8) / (bandwidth * 1000000)
-        # total number of packets to be sent
-        packets = (tx_video_bit_rate * 1000000) / (packet_size * 8)
-
-        response_delay = packets * (propogation_delay + packetisation_delay)
-
-        return response_delay
-
-    @staticmethod
-    def _changeVMState(agent_db_client, sim_time, ip_endpoint, mu, sigma, transition_state, next_state):
-        """
-        Send influx data to change VM state. Declared as static method since it doesn't need access to any instance variables.
-        :param sim_time:
-        :param ip_endpoint:
-        :param mu:
-        :param sigma:
-        :param transition_state:
-        :param next_state:
-        :return: the delay time
-        """
-
-        agent_db_client.write_points(lp.generate_vm_config(transition_state, ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], sim_time))
-
-        delay_time = random.normalvariate(mu, sigma)
-
-        agent_db_client.write_points(lp.generate_vm_config(next_state, ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], sim_time + delay_time))
-
-        return delay_time
-
-
-@pytest.fixture(scope='module')
-def run_simulation_fixture(streaming_sim_config):
-    """
-    A fixture, which checks if the the DB has been created, if not it runs the simulator with a 10 seconds timeout after that
-    """
-
-    influx_db_url = "http://" + streaming_sim_config['hosts'][0]['ip_address'] + ":8086"
-    agent1_url = "http://" + streaming_sim_config['hosts'][1]['ip_address'] + ":8186"
-    agent2_url = "http://" + streaming_sim_config['hosts'][2]['ip_address'] + ":8186"  
-
-    global INFLUX_DB_URL
-    global INFLUX_DB_NAME
-    global SIMULATION_TIME_SEC
-    global AGENT1_URL
-    global AGENT2_URL    
-
-    simulator = Sim(influx_db_url, INFLUX_DB_NAME, agent1_url, agent2_url)
-    dbs = simulator.db_client.get_list_database()
-    dbs = [db.get("name") for db in dbs]
-
-    # This check needed to be disabled as the CLMCMetrics database is always created when
-    # the test starts, irrespective of whether this is the 1st time or not
-#    if INFLUX_DB_NAME not in dbs:
-    simulator.reset()
-    simulator.run(SIMULATION_TIME_SEC)
-
-    print("10 seconds timeout is given so that the data could properly be inserted into the database.")
-    import time
-    time.sleep(10)
-
-
-def run_simulation(generate=True):
-    """
-    A method which runs the data generation simulator
-    :param generate: True for generating data, False for deleting the DB (optional argument, if not given, default value True is used)
-    """
-
-    global INFLUX_DB_NAME
-    global INFLUX_DB_URL
-    global SIMULATION_TIME_SEC
-    global AGENT1_URL
-    global AGENT2_URL
-
-    simulator = Sim(INFLUX_DB_URL, INFLUX_DB_NAME, AGENT1_URL, AGENT2_URL)
-
-    if generate:
-        simulator.reset()
-        simulator.run(SIMULATION_TIME_SEC)
-    else:
-        simulator.db_client.drop_database(simulator.influx_db_name)
-
-
-if __name__ == "__main__":
-    """
-    The main entry for this module. Code here is executed only if the StreamingSim.py file is executed, 
-    but not when it's imported in another module
-    """
-
-    # check if there are any command line arguments given when executing the module
-    if len(sys.argv) > 1:
-        # if CLI argument '-c' is set when executing the script, the influx db will be deleted instead of generating data
-        option = str(sys.argv[1]) != "-c"
-        run_simulation(generate=option)
-    else:
-        # no argument is given to the function call, hence the default value True is used
-        run_simulation()
diff --git a/test/streaming-sim/__init__.py b/test/streaming-sim/__init__.py
deleted file mode 100644
index 44f772595799f5fe338534918c95e23e08e80464..0000000000000000000000000000000000000000
--- a/test/streaming-sim/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-#!/usr/bin/python3
\ No newline at end of file
diff --git a/test/streaming-sim/conftest.py b/test/streaming-sim/conftest.py
deleted file mode 100644
index a1b0c145cc890a2e3dca9a2bcca1ac954f8d55d5..0000000000000000000000000000000000000000
--- a/test/streaming-sim/conftest.py
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/python3
-
-import pytest
-import yaml
-from influxdb import InfluxDBClient
-
-
-@pytest.fixture(scope="module", params=[{'config': {'rspec': 'test/streaming-sim/rspec.yml'}}])
-def streaming_sim_config(request):
-    """
-    Reads the service configuration deployed for the streaming simulation test.
-
-    :param request: access the parameters of the fixture
-    :return: the python object representing the read YAML file
-    """
-
-    with open(request.param['config']['rspec'], 'r') as stream:
-        data_loaded = yaml.load(stream)
-    return data_loaded
-
-
-@pytest.fixture(params=[{'database': 'CLMCMetrics'}], scope='module')
-def get_db_client(streaming_sim_config, request):
-    """
-    Creates an Influx DB client for the CLMC metrics database
-
-    :param streaming_sim_config: the fixture returning the yaml configuration
-    :param request: access the parameters of the fixture
-    :return: the created Influx DB client
-    """
-
-    return InfluxDBClient(host=streaming_sim_config['hosts'][0]['ip_address'], port=8086, database=request.param['database'], timeout=10)
diff --git a/test/streaming-sim/rspec.yml b/test/streaming-sim/rspec.yml
deleted file mode 100644
index 97967dc66fda80f75d1bdbcd5a0013ed0dc446c8..0000000000000000000000000000000000000000
--- a/test/streaming-sim/rspec.yml
+++ /dev/null
@@ -1,52 +0,0 @@
-hosts:
-  - name: clmc-service
-    cpus: 1
-    memory: 2048
-    disk: "10GB"
-    forward_ports:
-      - guest: 8086
-        host: 8086
-      - guest: 8888
-        host: 8888
-      - guest: 9092
-        host: 9092
-    ip_address: "203.0.113.100"
-  - name: ipendpoint1
-    cpus: 1
-    memory: 2048
-    disk: "10GB"
-    service_name: "ipendpoint"
-    forward_ports:
-      - guest: 80
-        host: 8081
-    ip_address: "203.0.113.101"
-    location: "DC1"
-    sfc_id: "MS_Template_1"
-    sfc_id_instance: "MS_I1"
-    sf_id: "adaptive_streaming"
-    sf_id_instance: "adaptive_streaming_I1"
-    ipendpoint_id: "adaptive_streaming_I1_apache1"
-    influxdb_url: "http://203.0.113.100:8086"
-    database_name: "CLMCMetrics"
-  - name: ipendpoint2
-    cpus: 1
-    memory: 2048
-    disk: "10GB"
-    service_name: "ipendpoint"
-    forward_ports:
-      - guest: 80
-        host: 8082
-    ip_address: "203.0.113.102"
-    location: "DC2"
-    sfc_id: "MS_Template_1"
-    sfc_id_instance: "MS_I1"
-    sf_id: "adaptive_streaming"
-    sf_id_instance: "adaptive_streaming_I1"
-    ipendpoint_id: "adaptive_streaming_I1_apache2"
-    influxdb_url: "http://203.0.113.100:8086"
-    database_name: "CLMCMetrics"      
-  - name: test-runner
-    cpus: 1
-    memory: 2048
-    disk: "10GB"
-    ip_address: "203.0.113.102"       
\ No newline at end of file
diff --git a/test/streaming-sim/test_rspec.py b/test/streaming-sim/test_rspec.py
deleted file mode 100644
index ecce587eab36aab6873b6c10c1c3924bcee93717..0000000000000000000000000000000000000000
--- a/test/streaming-sim/test_rspec.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/python3
-
-from subprocess import run
-from platform import system
-import pytest
-
-
-@pytest.mark.parametrize("service_name", [
-    'clmc-service',
-    'ipendpoint1',
-    'ipendpoint2'
-])
-def test_service_names(streaming_sim_config, service_name):
-    """
-    Tests the service names in the configuration.
-
-    :param streaming_sim_config: the configuration fixture collected from conftest.py
-    :param service_name the service name to test
-    """
-
-    assert any(s['name'] == service_name for s in streaming_sim_config['hosts']), "{0} not in list of hosts".format(service_name)
-    print("\nSuccessfully passed configuration test for service name {0}\n".format(service_name))
-
-
-def test_ping(streaming_sim_config):
-    """
-    Pings each service to test for liveliness
-
-    :param streaming_sim_config: the configuration fixture collected from conftest.py
-    """
-
-    print("\n")  # blank line printed for formatting purposes
-
-    ping_count = 1
-    system_dependent_param = "-n" if system().lower() == "windows" else "-c"
-
-    for service in streaming_sim_config['hosts']:
-        command = ["ping", system_dependent_param, str(ping_count), service['ip_address']]
-        assert run(command).returncode == 0, "Service ping test failed for {0} with ip address {1}".format(service['name'], service['ip_address'])
-        print("\nSuccessfully passed ping test for service: {0}\n".format(service['name']))
diff --git a/test/streaming-sim/test_simresults.py b/test/streaming-sim/test_simresults.py
deleted file mode 100644
index ffdd8a949b2cbb7408fe40e3fbcbc8d2fa04d3b1..0000000000000000000000000000000000000000
--- a/test/streaming-sim/test_simresults.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/python3
-
-import pytest
-from StreamingSim import run_simulation_fixture
-
-
-class TestSimulation(object):
-    """
-    A testing class used to group all the tests related to the simulation data
-    """
-
-    @pytest.mark.parametrize("query, expected_result", [
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."cpu_usage"',
-         {"time": "1970-01-01T00:00:00Z", "count_cpu_active_time": 7200, "count_cpu_idle_time": 7200, "count_cpu_usage": 7200}),
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."ipendpoint_route"',
-         {"time": "1970-01-01T00:00:00Z", "count_http_requests_fqdn_m": 7200, "count_network_fqdn_latency": 7200}),
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."mpegdash_service"',
-         {"time": "1970-01-01T00:00:00Z", "count_avg_response_time": 7200, "count_peak_response_time": 7200, "count_requests": 7200}),
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."net_port_io"',
-         {"time": "1970-01-01T00:00:00Z", "count_RX_BYTES_PORT_M": 7200, "count_TX_BYTES_PORT_M": 7200}),
-        ('SELECT count(*) FROM "CLMCMetrics"."autogen"."vm_res_alloc"',
-         {"time": "1970-01-01T00:00:00Z", "count_cpu": 12, "count_memory": 12, "count_storage": 12})
-    ])
-    def test_simulation(self, query, expected_result, get_db_client, run_simulation_fixture):
-        """
-        This is the entry point of the test. This method will be found and executed when the module is ran using pytest
-
-        :param query: the query to execute (value obtained from the pytest parameter decorator)
-        :param expected_result: the result expected from executing the query (value obtained from the pytest parameter decorator)
-        :param get_db_client the import db client fixture - imported from contest.py
-        :param run_simulation_fixture: the imported fixture to use to generate the testing data - the return value of the fixture is not needed in this case
-        """
-
-        # pytest automatically goes through all queries under test, declared in the parameters decorator
-
-        print("\n")  # prints a blank line for formatting purposes
-
-        # the raise_errors=False argument is given so that we could actually test that the DB didn't return any errors instead of raising an exception
-        query_result = get_db_client.query(query, raise_errors=False)
-
-        # test the error attribute of the result is None, that is no error is returned from executing the DB query
-        assert query_result.error is None, "An error was encountered while executing query {0}.".format(query)
-
-        # get the dictionary of result points; the next() function just gets the first element of the query results iterator (we only expect one item in the iterator)
-        actual_result = next(query_result.get_points())
-
-        assert expected_result == actual_result, "Simulation test failure"
-
-        print("Successfully passed test for the following query: {0}".format(query))
diff --git a/test/streaming/__init__.py b/test/streaming/__init__.py
deleted file mode 100644
index 44f772595799f5fe338534918c95e23e08e80464..0000000000000000000000000000000000000000
--- a/test/streaming/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-#!/usr/bin/python3
\ No newline at end of file
diff --git a/test/streaming/conftest.py b/test/streaming/conftest.py
deleted file mode 100644
index 77e0f1d2d5f50a2a13d7918d24b155563a377436..0000000000000000000000000000000000000000
--- a/test/streaming/conftest.py
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/python3
-
-import pytest
-import yaml
-
-
-@pytest.fixture(scope="module", params=[{'config': {'rspec': '/vagrant/test/streaming/rspec.yml'}}])
-def streaming_config(request):
-    """
-    Reads the service configuration deployed for the streaming simulation test.
-
-    :param request: access the parameters of the fixture
-    :return: the python object representing the read YAML file
-    """
-
-    with open(request.param['config']['rspec'], 'r') as stream:
-        data_loaded = yaml.load(stream)
-    return data_loaded
diff --git a/test/streaming/dashboard.json b/test/streaming/dashboard.json
deleted file mode 100644
index 52e7384e0c62b6cc33f7a09253fd951d5512d2c3..0000000000000000000000000000000000000000
--- a/test/streaming/dashboard.json
+++ /dev/null
@@ -1 +0,0 @@
-{"id":1,"cells":[{"i":"396b0b14-1482-4b8a-a359-f144541170a4","x":6,"y":8,"w":6,"h":4,"name":"AdaptiveStreaming_SF_NetworkBytesSentPerSecond","queries":[{"query":"SELECT derivative(mean(\"bytes_sent\"), 1s) AS \"bytes_sent_per_second\" FROM \"CLMCMetrics\".\"autogen\".\"net\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx2' GROUP BY :interval:","queryConfig":{"database":"","measurement":"","retentionPolicy":"","fields":[],"tags":{},"groupBy":{"time":"","tags":[]},"areTagsAccepted":false,"rawText":"SELECT derivative(mean(\"bytes_sent\"), 1s) AS \"bytes_sent_per_second\" FROM \"CLMCMetrics\".\"autogen\".\"net\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx2' GROUP BY :interval:","range":null}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/1/cells/396b0b14-1482-4b8a-a359-f144541170a4"}},{"i":"480b4037-a816-4e1c-8c84-edb39b0c1f6d","x":0,"y":8,"w":6,"h":4,"name":"AdapativeStreaming_SF_NetworkBytesSentPerSecond","queries":[{"query":"SELECT derivative(mean(\"bytes_sent\"), 1s) AS \"bytes_sent_per_second\" FROM \"CLMCMetrics\".\"autogen\".\"net\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx1' GROUP BY :interval:","queryConfig":{"database":"","measurement":"","retentionPolicy":"","fields":[],"tags":{},"groupBy":{"time":"","tags":[]},"areTagsAccepted":false,"rawText":"SELECT derivative(mean(\"bytes_sent\"), 1s) AS \"bytes_sent_per_second\" FROM \"CLMCMetrics\".\"autogen\".\"net\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx1' GROUP BY :interval:","range":null}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/1/cells/480b4037-a816-4e1c-8c84-edb39b0c1f6d"}},{"i":"6ad170aa-c5f2-4930-a604-1e88579dffee","x":6,"y":4,"w":6,"h":4,"name":"AdaptiveStreaming_SF2_CPU","queries":[{"query":"SELECT 100-mean(\"usage_idle\") AS \"mean_usage_idle\" FROM \"CLMCMetrics\".\"autogen\".\"cpu\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx2' GROUP BY :interval:","queryConfig":{"database":"","measurement":"","retentionPolicy":"","fields":[],"tags":{},"groupBy":{"time":"","tags":[]},"areTagsAccepted":false,"rawText":"SELECT 100-mean(\"usage_idle\") AS \"mean_usage_idle\" FROM \"CLMCMetrics\".\"autogen\".\"cpu\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx2' GROUP BY :interval:","range":null}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/1/cells/6ad170aa-c5f2-4930-a604-1e88579dffee"}},{"i":"7e424259-32b8-40be-aa53-477aaf801f0e","x":0,"y":4,"w":6,"h":4,"name":"AdaptiveStreaming_SF1_CPU","queries":[{"query":"SELECT 100-mean(\"usage_idle\") AS \"mean_usage_idle\" FROM \"CLMCMetrics\".\"autogen\".\"cpu\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx1' GROUP BY :interval:","queryConfig":{"database":"","measurement":"","retentionPolicy":"","fields":[],"tags":{},"groupBy":{"time":"","tags":[]},"areTagsAccepted":false,"rawText":"SELECT 100-mean(\"usage_idle\") AS \"mean_usage_idle\" FROM \"CLMCMetrics\".\"autogen\".\"cpu\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx1' GROUP BY :interval:","range":null}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/1/cells/7e424259-32b8-40be-aa53-477aaf801f0e"}},{"i":"a095c820-8bac-45fe-974d-4030e1bb8770","x":6,"y":0,"w":6,"h":4,"name":"AdaptiveStreaming_SF2_ActiveConnections","queries":[{"query":"SELECT mean(\"active\") AS \"mean_active\" FROM \"CLMCMetrics\".\"autogen\".\"nginx\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx2' GROUP BY :interval:","label":"nginx.active","queryConfig":{"database":"CLMCMetrics","measurement":"nginx","retentionPolicy":"autogen","fields":[{"field":"active","funcs":["mean"]}],"tags":{"ipendpoint":["adaptive_streaming_I1_nginx2"]},"groupBy":{"time":"auto","tags":[]},"areTagsAccepted":true,"rawText":null,"range":null}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/1/cells/a095c820-8bac-45fe-974d-4030e1bb8770"}},{"i":"63a7e85a-b411-46be-9478-8479405379a3","x":0,"y":0,"w":6,"h":4,"name":"AdaptiveStreaming_SF1_ActiveConnections","queries":[{"query":"SELECT mean(\"active\") AS \"mean_active\" FROM \"CLMCMetrics\".\"autogen\".\"nginx\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx1' GROUP BY :interval:","label":"nginx.active","queryConfig":{"database":"CLMCMetrics","measurement":"nginx","retentionPolicy":"autogen","fields":[{"field":"active","funcs":["mean"]}],"tags":{"ipendpoint":["adaptive_streaming_I1_nginx1"]},"groupBy":{"time":"auto","tags":[]},"areTagsAccepted":true,"rawText":null,"range":null}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/1/cells/63a7e85a-b411-46be-9478-8479405379a3"}}],"templates":[],"name":"Adaptive Streaming Experiment Dashboard","links":{"self":"/chronograf/v1/dashboards/1","cells":"/chronograf/v1/dashboards/1/cells","templates":"/chronograf/v1/dashboards/1/templates"}}
diff --git a/test/streaming/influx.json b/test/streaming/influx.json
deleted file mode 100644
index 34bb14a56b2cad467d93d5b8451804a85ebbc707..0000000000000000000000000000000000000000
--- a/test/streaming/influx.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
-    "id": "1",
-    "name": "Influx 1",
-    "url": "http://localhost:8086",
-    "default": true,
-    "telegraf": "CLMCMetrics"
-}
\ No newline at end of file
diff --git a/test/streaming/kapacitor.conf b/test/streaming/kapacitor.conf
deleted file mode 100644
index e8332d6297a5ba109fff0a191ff5a50d9ade15fd..0000000000000000000000000000000000000000
--- a/test/streaming/kapacitor.conf
+++ /dev/null
@@ -1,699 +0,0 @@
-# The hostname of this node.
-# Must be resolvable by any configured InfluxDB hosts.
-hostname = "localhost"
-# Directory for storing a small amount of metadata about the server.
-data_dir = "/var/lib/kapacitor"
-
-# Do not apply configuration overrides during startup.
-# Useful if the configuration overrides cause Kapacitor to fail startup.
-# This option is intended as a safe guard and should not be needed in practice.
-skip-config-overrides = false
-
-# Default retention-policy, if a write is made to Kapacitor and
-# it does not have a retention policy associated with it,
-# then the retention policy will be set to this value
-default-retention-policy = ""
-
-[http]
-  # HTTP API Server for Kapacitor
-  # This server is always on,
-  # it serves both as a write endpoint
-  # and as the API endpoint for all other
-  # Kapacitor calls.
-  bind-address = ":9092"
-  log-enabled = true
-  write-tracing = false
-  pprof-enabled = false
-  https-enabled = false
-  https-certificate = "/etc/ssl/kapacitor.pem"
-
-[config-override]
-  # Enable/Disable the service for overridding configuration via the HTTP API.
-  enabled = true
-
-[logging]
-    # Destination for logs
-    # Can be a path to a file or 'STDOUT', 'STDERR'.
-    file = "/var/log/kapacitor/kapacitor.log"
-    # Logging level can be one of:
-    # DEBUG, INFO, ERROR
-    # HTTP logging can be disabled in the [http] config section.
-    level = "INFO"
-
-[load]
-  # Enable/Disable the service for loading tasks/templates/handlers
-  # from a directory
-  enabled = true
-  # Directory where task/template/handler files are set
-  dir = "/etc/kapacitor/load"
-
-
-[replay]
-  # Where to store replay files, aka recordings.
-  dir = "/var/lib/kapacitor/replay"
-
-[task]
-  # Where to store the tasks database
-  # DEPRECATED: This option is not needed for new installations.
-  # It is only used to determine the location of the task.db file
-  # for migrating to the new `storage` service.
-  dir = "/var/lib/kapacitor/tasks"
-  # How often to snapshot running task state.
-  snapshot-interval = "60s"
-
-[storage]
-  # Where to store the Kapacitor boltdb database
-  boltdb = "/var/lib/kapacitor/kapacitor.db"
-
-[deadman]
-  # Configure a deadman's switch
-  # Globally configure deadman's switches on all tasks.
-  # NOTE: for this to be of use you must also globally configure at least one alerting method.
-  global = false
-  # Threshold, if globally configured the alert will be triggered if the throughput in points/interval is <= threshold.
-  threshold = 0.0
-  # Interval, if globally configured the frequency at which to check the throughput.
-  interval = "10s"
-  # Id -- the alert Id, NODE_NAME will be replaced with the name of the node being monitored.
-  id = "node 'NODE_NAME' in task '{{ .TaskName }}'"
-  # The message of the alert. INTERVAL will be replaced by the interval.
-  message = "{{ .ID }} is {{ if eq .Level \"OK\" }}alive{{ else }}dead{{ end }}: {{ index .Fields \"collected\" | printf \"%0.3f\" }} points/INTERVAL."
-
-
-# Multiple InfluxDB configurations can be defined.
-# Exactly one must be marked as the default.
-# Each one will be given a name and can be referenced in batch queries and InfluxDBOut nodes.
-[[influxdb]]
-  # Connect to an InfluxDB cluster
-  # Kapacitor can subscribe, query and write to this cluster.
-  # Using InfluxDB is not required and can be disabled.
-  enabled = true
-  default = true
-  name = "localhost"
-  urls = ["http://localhost:8086"]
-  username = ""
-  password = ""
-  timeout = 0
-  # Absolute path to pem encoded CA file.
-  # A CA can be provided without a key/cert pair
-  #   ssl-ca = "/etc/kapacitor/ca.pem"
-  # Absolutes paths to pem encoded key and cert files.
-  #   ssl-cert = "/etc/kapacitor/cert.pem"
-  #   ssl-key = "/etc/kapacitor/key.pem"
-
-  # Do not verify the TLS/SSL certificate.
-  # This is insecure.
-  insecure-skip-verify = false
-
-  # Maximum time to try and connect to InfluxDB during startup
-  startup-timeout = "5m"
-
-  # Turn off all subscriptions
-  disable-subscriptions = false
-
-  # Subscription mode is either "cluster" or "server"
-  subscription-mode = "cluster"
-
-  # Which protocol to use for subscriptions
-  # one of 'udp', 'http', or 'https'.
-  subscription-protocol = "http"
-
-  # Subscriptions resync time interval
-  # Useful if you want to subscribe to new created databases
-  # without restart Kapacitord
-  subscriptions-sync-interval = "1m0s"
-
-  # Override the global hostname option for this InfluxDB cluster.
-  # Useful if the InfluxDB cluster is in a separate network and
-  # needs special config to connect back to this Kapacitor instance.
-  # Defaults to `hostname` if empty.
-  kapacitor-hostname = ""
-
-  # Override the global http port option for this InfluxDB cluster.
-  # Useful if the InfluxDB cluster is in a separate network and
-  # needs special config to connect back to this Kapacitor instance.
-  # Defaults to the port from `[http] bind-address` if 0.
-  http-port = 0
-
-  # Host part of a bind address for UDP listeners.
-  # For example if a UDP listener is using port 1234
-  # and `udp-bind = "hostname_or_ip"`,
-  # then the UDP port will be bound to `hostname_or_ip:1234`
-  # The default empty value will bind to all addresses.
-  udp-bind = ""
-  # Subscriptions use the UDP network protocl.
-  # The following options of for the created UDP listeners for each subscription.
-  # Number of packets to buffer when reading packets off the socket.
-  udp-buffer = 1000
-  # The size in bytes of the OS read buffer for the UDP socket.
-  # A value of 0 indicates use the OS default.
-  udp-read-buffer = 0
-
-  [influxdb.subscriptions]
-    # Set of databases and retention policies to subscribe to.
-    # If empty will subscribe to all, minus the list in
-    # influxdb.excluded-subscriptions
-    #
-    # Format
-    # db_name = <list of retention policies>
-    #
-    # Example:
-    # my_database = [ "default", "longterm" ]
-  [influxdb.excluded-subscriptions]
-    # Set of databases and retention policies to exclude from the subscriptions.
-    # If influxdb.subscriptions is empty it will subscribe to all
-    # except databases listed here.
-    #
-    # Format
-    # db_name = <list of retention policies>
-    #
-    # Example:
-    # my_database = [ "default", "longterm" ]
-
-[kubernetes]
-  # Enable/Disable the kubernetes service.
-  # Needed by the k8sAutoscale TICKscript node.
-  enabled = false
-  # There are several ways to connect to the kubernetes API servers:
-  #
-  # Via the proxy, start the proxy via the `kubectl proxy` command:
-  #   api-servers = ["http://localhost:8001"]
-  #
-  # From within the cluster itself, in which case
-  # kubernetes secrets and DNS services are used
-  # to determine the needed configuration.
-  #   in-cluster = true
-  #
-  # Direct connection, in which case you need to know
-  # the URL of the API servers,  the authentication token and
-  # the path to the ca cert bundle.
-  # These value can be found using the `kubectl config view` command.
-  #   api-servers = ["http://192.168.99.100:8443"]
-  #   token = "..."
-  #   ca-path = "/path/to/kubernetes/ca.crt"
-  #
-  # Kubernetes can also serve as a discoverer for scrape targets.
-  # In that case the type of resources to discoverer must be specified.
-  # Valid values are: "node", "pod", "service", and "endpoint".
-  #   resource = "pod"
-
-
-
-[smtp]
-  # Configure an SMTP email server
-  # Will use TLS and authentication if possible
-  # Only necessary for sending emails from alerts.
-  enabled = false
-  host = "localhost"
-  port = 25
-  username = ""
-  password = ""
-  # From address for outgoing mail
-  from = ""
-  # List of default To addresses.
-  # to = ["oncall@example.com"]
-
-  # Skip TLS certificate verify when connecting to SMTP server
-  no-verify = false
-  # Close idle connections after timeout
-  idle-timeout = "30s"
-
-  # If true the all alerts will be sent via Email
-  # without explicitly marking them in the TICKscript.
-  global = false
-  # Only applies if global is true.
-  # Sets all alerts in state-changes-only mode,
-  # meaning alerts will only be sent if the alert state changes.
-  state-changes-only = false
-
-[snmptrap]
-  # Configure an SNMP trap server
-  enabled = false
-  # The host:port address of the SNMP trap server
-  addr = "localhost:162"
-  # The community to use for traps
-  community = "kapacitor"
-  # Number of retries when sending traps
-  retries = 1
-
-
-[opsgenie]
-    # Configure OpsGenie with your API key and default routing key.
-    enabled = false
-    # Your OpsGenie API Key.
-    api-key = ""
-    # Default OpsGenie teams, can be overridden per alert.
-    # teams = ["team1", "team2"]
-    # Default OpsGenie recipients, can be overridden per alert.
-    # recipients = ["recipient1", "recipient2"]
-    # The OpsGenie API URL should not need to be changed.
-    url = "https://api.opsgenie.com/v1/json/alert"
-    # The OpsGenie Recovery URL, you can change this
-    # based on which behavior you want a recovery to
-    # trigger (Add Notes, Close Alert, etc.)
-    recovery_url = "https://api.opsgenie.com/v1/json/alert/note"
-    # If true then all alerts will be sent to OpsGenie
-    # without explicitly marking them in the TICKscript.
-    # The team and recipients can still be overridden.
-    global = false
-
-[victorops]
-  # Configure VictorOps with your API key and default routing key.
-  enabled = false
-  # Your VictorOps API Key.
-  api-key = ""
-  # Default VictorOps routing key, can be overridden per alert.
-  routing-key = ""
-  # The VictorOps API URL should not need to be changed.
-  url = "https://alert.victorops.com/integrations/generic/20131114/alert"
-  # If true the all alerts will be sent to VictorOps
-  # without explicitly marking them in the TICKscript.
-  # The routing key can still be overridden.
-  global = false
-  # Use JSON for the "data" field
-  # New installations will want to set this to true as it makes
-  # the data that triggered the alert available within VictorOps.
-  # The default is "false" for backwards compatibility reasons.
-  # json-data = false
-
-[pagerduty]
-  # Configure PagerDuty.
-  enabled = false
-  # Your PagerDuty Service Key.
-  service-key = ""
-  # The PagerDuty API URL should not need to be changed.
-  url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
-  # If true the all alerts will be sent to PagerDuty
-  # without explicitly marking them in the TICKscript.
-  global = false
-
-[pushover]
-  # Configure Pushover.
-  enabled = false
-  # Your Pushover API token.
-  token = ""
-  # Your Pushover USER_TOKEN.
-  user-key = ""
-  # The URL for the Pushover API.
-  url = "https://api.pushover.net/1/messages.json"
-
-##########################################
-# Configure Alert POST request Endpoints
-
-# As ENV variables:
-# KAPACITOR_HTTPPOST_0_ENDPOINT = "example"
-# KAPACITOR_HTTPPOST_0_URL = "http://example.com"
-# KAPACITOR_HTTPPOST_0_HEADERS_Example = "header"
-
-# [[httppost]]
-#   endpoint = "example"
-#   url = "http://example.com"
-#   headers = { Example = "your-key" }
-#   basic-auth = { username = "my-user", password = "my-pass" }
-#
-#   # Provide an alert template for constructing a custom HTTP body.
-#   # Alert templates are only used with post alert handlers as they consume alert data.
-#   # The template uses https://golang.org/pkg/text/template/ and has access to the following fields:
-#   #    * .ID - The unique ID for this alert
-#   #    * .Message - The message of the alert
-#   #    * .Details - The details of the alert
-#   #    * .Time - The time the alert event occurred
-#   #    * .Duration - The duration of the alert event.
-#   #    * .Level - The level of the alert, i.e INFO, WARN, or CRITICAL.
-#   #    * .Data - The data that triggered the alert.
-#   #
-#   # Specify the template inline.
-#   alert-template = "{{.Message}}:{{range .Data.Series}}{{.Tags}},{{range .Values}}{{.}}{{end}}{{end}}"
-#   # Specify an absolute path to a template file.
-#   alert-template-file = "/path/to/template/file"
-#
-#   # Provide a row template for constructing a custom HTTP body.
-#   # Row templates are only used with httpPost pipeline nodes as they consume a row at a time.
-#   # The template uses https://golang.org/pkg/text/template/ and has access to the following fields:
-#   #    * .Name - The measurement name of the data stream
-#   #    * .Tags - A map of tags on the data.
-#   #    * .Values - A list of values, each entry is a map containing a "time" key for the time of the point
-#   #       and keys for all other fields on the point.
-#   #
-#   # Specify the template inline.
-#   row-template = "{{.Name}} host={{index .Tags \"host\"}}{{range .Values}} {{index . "time"}} {{index . "value"}}{{end}}"
-#   # Specify an absolute path to a template file.
-#   row-template-file = "/path/to/template/file"
-
-[slack]
-  # Configure Slack.
-  enabled = true
-  # The Slack webhook URL, can be obtained by adding
-  # an Incoming Webhook integration.
-  # Visit https://slack.com/services/new/incoming-webhook
-  # to add new webhook for Kapacitor.
-  url = "https://hooks.slack.com/services/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"
-  # Default channel for messages
-  channel = "#clmc"
-  # If true all the alerts will be sent to Slack
-  # without explicitly marking them in the TICKscript.
-  global = false
-  # Only applies if global is true.
-  # Sets all alerts in state-changes-only mode,
-  # meaning alerts will only be sent if the alert state changes.
-  state-changes-only = false
-
-[telegram]
-  # Configure Telegram.
-  enabled = false
-  # The Telegram Bot URL should not need to be changed.
-  url = "https://api.telegram.org/bot"
-  # Telegram Bot Token, can be obtained From @BotFather.
-  token = ""
-  # Default recipient for messages, Contact @myidbot on Telegram to get an ID.
-  chat-id = ""
-  # Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your alert message.
-  #parse-mode  = "Markdown"
-  # Disable link previews for links in this message
-  disable-web-page-preview = false
-  # Sends the message silently. iOS users will not receive a notification, Android users will receive a notification with no sound.
-  disable-notification = false
-  # If true the all alerts will be sent to Telegram
-  # without explicitly marking them in the TICKscript.
-  global = false
-  # Only applies if global is true.
-  # Sets all alerts in state-changes-only mode,
-  # meaning alerts will only be sent if the alert state changes.
-  state-changes-only = false
-
-[hipchat]
-  # Configure HipChat.
-  enabled = false
-  # The HipChat API URL. Replace subdomain with your
-  # HipChat subdomain.
-  # url = "https://subdomain.hipchat.com/v2/room"
-  # Visit https://www.hipchat.com/docs/apiv2
-  # for information on obtaining your room id and
-  # authentication token.
-  # Default room for messages
-  room = ""
-  # Default authentication token
-  token = ""
-  # If true then all alerts will be sent to HipChat
-  # without explicitly marking them in the TICKscript.
-  global = false
-  # Only applies if global is true.
-  # Sets all alerts in state-changes-only mode,
-  # meaning alerts will only be sent if the alert state changes.
-  state-changes-only = false
-
-[alerta]
-  # Configure Alerta.
-  enabled = false
-  # The Alerta URL.
-  url = ""
-  # Default authentication token.
-  token = ""
-  # Default token prefix
-  # If you are on older versions of alerta you may need to change this to "Key"
-  token-prefix = "Bearer"
-  # Default environment.
-  environment = ""
-  # Default origin.
-  origin = "kapacitor"
-
-[sensu]
-  # Configure Sensu.
-  enabled = false
-  # The Sensu Client host:port address.
-  addr = "sensu-client:3030"
-  # Default JIT source.
-  source = "Kapacitor"
-
-[reporting]
-  # Send usage statistics
-  # every 12 hours to Enterprise.
-  enabled = true
-  url = "https://usage.influxdata.com"
-
-[stats]
-  # Emit internal statistics about Kapacitor.
-  # To consume these stats create a stream task
-  # that selects data from the configured database
-  # and retention policy.
-  #
-  # Example:
-  #  stream|from().database('_kapacitor').retentionPolicy('autogen')...
-  #
-  enabled = true
-  stats-interval = "10s"
-  database = "_kapacitor"
-  retention-policy= "autogen"
-
-[udf]
-# Configuration for UDFs (User Defined Functions)
-[udf.functions]
-    # Example go UDF.
-    # First compile example:
-    #   go build -o avg_udf ./udf/agent/examples/moving_avg.go
-    #
-    # Use in TICKscript like:
-    #   stream.goavg()
-    #           .field('value')
-    #           .size(10)
-    #           .as('m_average')
-    #
-    # uncomment to enable
-    #[udf.functions.goavg]
-    #   prog = "./avg_udf"
-    #   args = []
-    #   timeout = "10s"
-
-    # Example python UDF.
-    # Use in TICKscript like:
-    #   stream.pyavg()
-    #           .field('value')
-    #           .size(10)
-    #           .as('m_average')
-    #
-    # uncomment to enable
-    #[udf.functions.pyavg]
-    #   prog = "/usr/bin/python2"
-    #   args = ["-u", "./udf/agent/examples/moving_avg.py"]
-    #   timeout = "10s"
-    #   [udf.functions.pyavg.env]
-    #       PYTHONPATH = "./udf/agent/py"
-
-    # Example UDF over a socket
-    #[udf.functions.myCustomUDF]
-    #   socket = "/path/to/socket"
-    #   timeout = "10s"
-
-[talk]
-  # Configure Talk.
-  enabled = false
-  # The Talk webhook URL.
-  url = "https://jianliao.com/v2/services/webhook/uuid"
-  # The default authorName.
-  author_name = "Kapacitor"
-
-# MQTT client configuration.
-#  Mutliple different clients may be configured by
-#  repeating [[mqtt]] sections.
-[[mqtt]]
-  enabled = false
-  # Unique name for this broker configuration
-  name = "localhost"
-  # Whether this broker configuration is the default
-  default = true
-  # URL of the MQTT broker.
-  # Possible protocols include:
-  #  tcp - Raw TCP network connection
-  #  ssl - TLS protected TCP network connection
-  #  ws  - Websocket network connection
-  url = "tcp://localhost:1883"
-
-  # TLS/SSL configuration
-  # A CA can be provided without a key/cert pair
-  #   ssl-ca = "/etc/kapacitor/ca.pem"
-  # Absolutes paths to pem encoded key and cert files.
-  #   ssl-cert = "/etc/kapacitor/cert.pem"
-  #   ssl-key = "/etc/kapacitor/key.pem"
-
-  # Unique ID for this MQTT client.
-  # If empty used the value of "name"
-  client-id = ""
-
-  # Username
-  username = ""
-  # Password
-  password = ""
-
-[[swarm]]
-  # Enable/Disable the Docker Swarm service.
-  # Needed by the swarmAutoscale TICKscript node.
-  enabled = false
-  # Unique ID for this Swarm cluster
-  # NOTE: This is not the ID generated by Swarm rather a user defined
-  # ID for this cluster since Kapacitor can communicate with multiple clusters.
-  id = ""
-  # List of URLs for Docker Swarm servers.
-  servers = ["http://localhost:2376"]
-  # TLS/SSL Configuration for connecting to secured Docker daemons
-  ssl-ca = ""
-  ssl-cert = ""
-  ssl-key = ""
-  insecure-skip-verify = false
-
-##################################
-# Input Methods, same as InfluxDB
-#
-
-[collectd]
-  enabled = false
-  bind-address = ":25826"
-  database = "collectd"
-  retention-policy = ""
-  batch-size = 1000
-  batch-pending = 5
-  batch-timeout = "10s"
-  typesdb = "/usr/share/collectd/types.db"
-
-[opentsdb]
-  enabled = false
-  bind-address = ":4242"
-  database = "opentsdb"
-  retention-policy = ""
-  consistency-level = "one"
-  tls-enabled = false
-  certificate = "/etc/ssl/influxdb.pem"
-  batch-size = 1000
-  batch-pending = 5
-  batch-timeout = "1s"
-
-# Service Discovery and metric scraping
-
-[[scraper]]
-  enabled = false
-  name = "myscraper"
-  # Specify the id of a discoverer service specified below
-  discoverer-id = ""
-  # Specify the type of discoverer service being used.
-  discoverer-service = ""
-  db = "prometheus_raw"
-  rp = "autogen"
-  type = "prometheus"
-  scheme = "http"
-  metrics-path = "/metrics"
-  scrape-interval = "1m0s"
-  scrape-timeout = "10s"
-  username = ""
-  password = ""
-  bearer-token = ""
-  ssl-ca = ""
-  ssl-cert = ""
-  ssl-key = ""
-  ssl-server-name = ""
-  insecure-skip-verify = false
-
-# Supported discovery services
-
-[[azure]]
-  enabled = false
-  id = "myazure"
-  port = 80
-  subscription-id = ""
-  tenant-id = ""
-  client-id = ""
-  client-secret = ""
-  refresh-interval = "5m0s"
-
-[[consul]]
-  enabled = false
-  id = "myconsul"
-  address = "127.0.0.1:8500"
-  token = ""
-  datacenter = ""
-  tag-separator = ","
-  scheme = "http"
-  username = ""
-  password = ""
-  ssl-ca = ""
-  ssl-cert = ""
-  ssl-key = ""
-  ssl-server-name = ""
-  insecure-skip-verify = false
-
-[[dns]]
-  enabled = false
-  id = "mydns"
-  refresh-interval = "30s"
-  ## Type can be SRV, A, or AAAA
-  type = "SRV"
-  ## Port is the port to scrape for records returned by A or AAAA types
-  port = 80
-
-[[ec2]]
-  enabled = false
-  id = "myec2"
-  region = "us-east-1"
-  access-key = ""
-  secret-key = ""
-  profile = ""
-  refresh-interval = "1m0s"
-  port = 80
-
-[[file-discovery]]
-  enabled = false
-  id = "myfile"
-  refresh-interval = "5m0s"
-  files = []
-
-[[gce]]
-  enabled = false
-  id = "mygce"
-  project = ""
-  zone = ""
-  filter = ""
-  refresh-interval = "1m0s"
-  port = 80
-  tag-separator = ","
-
-[[marathon]]
-  enabled = false
-  id = "mymarathon"
-  timeout = "30s"
-  refresh-interval = "30s"
-  bearer-token = ""
-  ssl-ca = ""
-  ssl-cert = ""
-  ssl-key = ""
-  ssl-server-name = ""
-  insecure-skip-verify = false
-
-[[nerve]]
-  enabled = false
-  id = "mynerve"
-  timeout = "10s"
-
-[[serverset]]
-  enabled = false
-  id = "myserverset"
-  timeout = "10s"
-
-[[static-discovery]]
-  enabled = false
-  id = "mystatic"
-  targets = ["localhost:9100"]
-  [static.labels]
-    region = "us-east-1"
-
-[[triton]]
-  enabled = false
-  id = "mytriton"
-  account = ""
-  dns-suffix = ""
-  endpoint = ""
-  port = 9163
-  refresh-interval = "1m0s"
-  version = 1
-  ssl-ca = ""
-  ssl-cert = ""
-  ssl-key = ""
-  ssl-server-name = ""
-  insecure-skip-verify = false
diff --git a/test/streaming/kapacitor.json b/test/streaming/kapacitor.json
deleted file mode 100644
index 60118860fc3351e456f1cccfe6145a169f50734f..0000000000000000000000000000000000000000
--- a/test/streaming/kapacitor.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-    "id": "1",
-    "name": "CLMCKapacitor",
-    "url": "http://localhost:9092",
-    "active": false
-}
\ No newline at end of file
diff --git a/test/streaming/manual.md b/test/streaming/manual.md
deleted file mode 100644
index 7db0fc7126408bbd70ad54775b03e90b67867b83..0000000000000000000000000000000000000000
--- a/test/streaming/manual.md
+++ /dev/null
@@ -1,146 +0,0 @@
-<!--
-// © University of Southampton IT Innovation Centre, 2017
-//
-// Copyright in this software belongs to University of Southampton
-// IT Innovation Centre of Gamma House, Enterprise Road, 
-// Chilworth Science Park, Southampton, SO16 7NS, UK.
-//
-// This software may not be used, sold, licensed, transferred, copied
-// or reproduced in whole or in part in any manner or form or in or
-// on any media by any person other than in accordance with the terms
-// of the Licence Agreement supplied with the software, or otherwise
-// without the prior written consent of the copyright owners.
-//
-// This software is distributed WITHOUT ANY WARRANTY, without even the
-// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-// PURPOSE, except where stated in the Licence Agreement supplied with
-// the software.
-//
-//      Created By :            Michael Boniface
-//      Updated By :            Simon Crowle
-//      Created Date :          18-12-2017
-//      Update Date :           14-02-2018
-//      Created for Project :   FLAME
--->
-
-# CLMC Adaptive Streaming Test
-
-This test streams mpeg-dash video using the two nginx servers monitored by Telegraf configured with a default apache plugin and a net_response plugin. The data is stored in the `clmc-service` using database `CLMCMetrics` and measurements `nginx` and `net_response`
-
-The following command brings up the services
-
-`vagrant --fixture=streaming up`
-
-* clmc-service: configured with influx, kapacitor, chornograf
-* nginx1@DC1, nginx2@DC2: configured with nginx and a test video located at http://192.168.50.11:80/test_video/stream.mpd on the internal vbox network and at http://localhost:8081/test_video/stream.mpd if accessing from the host machine
-
-### Run the test set-up
-
-`vagrant --fixture=streaming ssh clmc-service -- "sudo /vagrant/test/streaming/setupCLMC.sh /vagrant/test/streaming"`  
-`vagrant --fixture=streaming ssh nginx1 -- "sudo /vagrant/test/streaming/setupNGINX.sh"`
-
-### Run the automated test
-
-To run the load test using the following command (here, the last parameter '15' refers to the number of VLC player clients to be launched):
-
-`vagrant --fixture=streaming ssh loadtest-streaming -- "sudo /vagrant/test/streaming/run.sh /home/ubuntu/test/streaming http://192.168.50.11/test_video/stream.mpd 15`
-
-This test currently just generates the load and does not have any assertions. It breaks at 1000.
-
-And then point your browser to the Chronograf dashboard:
-
-`http://localhost:8888`
-
-### Run the automated PyTests
-
-SSH into the clmc-service VM:
-
-`vagrant --fixture=streaming ssh clmc-service`
-
-Run the automated tests written in pytest:
-
-`pytest -s /vagrant/test/streaming/` 
-
-### Manual test 
-
-## Manual set-up of Chronograf's CLMC data source
-
-If you __do not__ want to run the automatic set-up, basic entry to the Chronograf dashboard is as follows:
-
-1. Point your browser to: [http://localhost:8888](http://localhost:8888)
-2. Enter your connection string: `http://localhost:8086`
-3. Enter the Name: `Influx 1`
-4. Enter the Telegraf database: `CLMCMetrics`
-
-## Manual test on Windows
-
-### View the video
-Install VLC video client on the host machine, you must use a very recent version otherwise the MPD file cannot we read. At the time of writng the following nighly build was installed:
-
-https://nightlies.videolan.org/build/win32/vlc-3.0.0-rc1-20171201-0326/vlc-3.0.0-20171201-0326-rc1-win32.exe
-
-Start the VLC Player
-
-`Media->Open Network Stream`
-
-The test video is the FLAME project video and it can be viewed at the following location.
-
-`Enter the network URL: http://localhost:8081/test_video/stream.mpd for nginx1 server`
-
-The video should play.
-
-### Query the data
-
-Open Chronograph by entering the following URL into a browser on the host http://localhost:8888. Your CLMC data source, Kapacitor and demonstration dashboard should be ready for you to explore.
-
-Press the Data Explorer in the menu and select the nginx measurement and create a query such as 
-
-`SELECT mean("requests") AS "mean_requests" FROM "CLMCMetrics"."autogen"."nginx" WHERE time > now() - 1h GROUP BY time(10s)`
-
-## KPI triggers
-
-In this demonstrator an example KPI rule has been set up in Kapacitor which fires when the average number of active connections per 5 seconds on the Nginx 1 or Nginx 2 server goes above certain thresholds ( a 'warning' at 10 connections/5 seconds ). The TICKscript specification for this rule is as follows:
-
-```
-dbrp "CLMCMetrics"."autogen"
-
-// Nginx 1 rule
-// -------------
-var n1Data = batch
-    |query(''' SELECT mean("active") AS "mean_active" FROM "CLMCMetrics"."autogen"."nginx" WHERE "ipendpoint"='adaptive_streaming_I1_nginx1' ''')
-        .period(5s)
-        .every(5s)
-
-varn n1Alert = n1Data
-    |alert()
-        .id('{{ .Name }}/adaptive_streaming_I1_nginx1')
-        .message('{{ .ID }} is {{ .Level }} Mean active connections: {{ index .Fields "mean_active" }}')
-        .warn(lambda: "mean_active" > 10)
-        .slack()
-        .log( '/tmp/RPSLoad.log' )
-
-// Nginx 2 rule
-// -------------
-var n2Data = batch
-    |query(''' SELECT mean("active") AS "mean_active" FROM "CLMCMetrics"."autogen"."nginx" WHERE "ipendpoint"='adaptive_streaming_I1_nginx2' ''')
-        .period(5s)
-        .every(5s)
-
-var n2Alert = n2Data
-    |alert()
-        .id('{{ .Name }}/adaptive_streaming_I1_nginx2')
-        .message('{{ .ID }} is {{ .Level }} Mean active connections: {{ index .Fields "mean_active" }}')
-        .warn(lambda: "mean_active" > 10)
-        .slack()
-        .log( '/tmp/RPSLoad.log' )
-```
-
-Alerts are sent to both an internal logging within the CLMC service file system and also to a FLAME demo Slack service:
-
-https://flamedemo-itinnov.slack.com
-
-Alerts can be found under the '#clmc' channel. 
-
-### Kapacitor rules in Chronograf's GUI
-
-Additional rules can be added to this demonstrator either via the Chronograf GUI (see [here](https://docs.influxdata.com/chronograf/v1.4/introduction/getting-started/#4-connect-chronograf-to-kapacitor) for more information) or by using the Kapacitor HTTP API and TICKscript (for an introduction, [look here](https://docs.influxdata.com/kapacitor/v1.4/tick/)).
diff --git a/test/streaming/report.sh b/test/streaming/report.sh
deleted file mode 100644
index ad1251a7cb908a97b4bb97834fc39c455cabbc5d..0000000000000000000000000000000000000000
--- a/test/streaming/report.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-# This script reads stdin and expects the output of cvlc.
-# It is used by the run.sh script and receives the output of the cvlc client.
-# It counts the number of times the frame "dropping" error is seen and every 10 times it sends a message to telegraf reporting "another 10" errors.
-
-if [ "$#" -ne 1 ]; then
-    echo "Error: illegal number of arguments: "$#
-    echo "Usage: report.sh <client number>"
-    exit 
-fi
-
-COUNTER=$1
-TELEGRAF=http://localhost:8186
-
-ERR_COUNT=0
-while read line; do
-  if [[ $line = *"dropping"* ]]; then
-    ERR_COUNT=$(($ERR_COUNT + 1))
-  fi
-  TEN=$((ERR_COUNT % 10))
-  if [ $TEN -eq 0 ]; then
-    curl -i -XPOST "${TELEGRAF}/write?precision=s" --data-binary "vlc,client=${COUNTER} drop_error=10 $(date +%s)" >& /dev/null
-  fi
-done
\ No newline at end of file
diff --git a/test/streaming/rspec.yml b/test/streaming/rspec.yml
deleted file mode 100644
index b1291a381c3ca2eb09078958dec0e0f29706a7a3..0000000000000000000000000000000000000000
--- a/test/streaming/rspec.yml
+++ /dev/null
@@ -1,64 +0,0 @@
-hosts:
-  - name: clmc-service
-    cpus: 1
-    memory: 2048
-    disk: "10GB"
-    forward_ports:
-      - guest: 8086
-        host: 8086
-      - guest: 8888
-        host: 8888
-      - guest: 9092
-        host: 9092
-    ip_address: "192.168.50.10"
-  - name: nginx1
-    cpus: 1
-    memory: 2048
-    disk: "10GB"
-    service_name: "nginx"
-    forward_ports:
-      - guest: 80
-        host: 8081
-    ip_address: "192.168.50.11"
-    location: "DC1"
-    sfc_id: "MS_Template_1"
-    sfc_id_instance: "MS_I1"
-    sf_id: "adaptive_streaming"
-    sf_id_instance: "adaptive_streaming_I1"
-    ipendpoint_id: "adaptive_streaming_I1_nginx1"
-    influxdb_url: "http://192.168.50.10:8086"
-    database_name: "CLMCMetrics"
-  - name: nginx2
-    cpus: 1
-    memory: 2048
-    disk: "10GB"
-    service_name: "nginx"
-    forward_ports:
-      - guest: 80
-        host: 8082
-    ip_address: "192.168.50.12"
-    location: "DC2"
-    sfc_id: "MS_Template_1"
-    sfc_id_instance: "MS_I1"
-    sf_id: "adaptive_streaming"
-    sf_id_instance: "adaptive_streaming_I1"
-    ipendpoint_id: "adaptive_streaming_I1_nginx2"
-    influxdb_url: "http://192.168.50.10:8086"
-    database_name: "CLMCMetrics"  
-  - name: loadtest-streaming
-    cpus: 2
-    memory: 4096
-    disk: "10GB"
-    service_name: "loadtest-streaming"
-    forward_ports:
-      - guest: 80
-        host: 8083
-    ip_address: "192.168.50.13"
-    location: "DC1"
-    sfc_id: "MS_Template_1"
-    sfc_id_instance: "MS_I1"
-    sf_id: "adaptive_streaming_client"
-    sf_id_instance: "adaptive_streaming_I1"
-    ipendpoint_id: "adaptive_streaming_I1_client1"
-    influxdb_url: "http://192.168.50.10:8086"
-    database_name: "CLMCMetrics"    
diff --git a/test/streaming/rules.json b/test/streaming/rules.json
deleted file mode 100644
index faad48b28216716cc63622b59106fad9d7130cbb..0000000000000000000000000000000000000000
--- a/test/streaming/rules.json
+++ /dev/null
@@ -1,9 +0,0 @@
-{
-    "id" : "Request_Rate_Alert_NGINXServers",
-    "type" : "batch",
-    "dbrps" : [{"db": "CLMCMetrics", "rp" : "autogen"}],
-    
-    "script" : "\/\/ NGINX 1 Rule\r\n\/\/ -------------\r\nvar n1Data = batch\r\n    |query(''' SELECT mean(\"active\") AS \"mean_active\" FROM \"CLMCMetrics\".\"autogen\".\"nginx\" WHERE \"ipendpoint\"='adaptive_streaming_I1_nginx1' ''')\r\n        .period(5s)\r\n        .every(5s)\r\n\r\nvar n1Alert = n1Data\r\n    |alert()\r\n        .id('{{ .Name }}\/adaptive_streaming_I1_nginx1')\r\n        .message('{{ .ID }} is {{ .Level }} Mean active connections: {{ index .Fields \"mean_active\" }}')\r\n        .warn(lambda: \"mean_active\" > 10)\r\n        .slack()\r\n        .log( '\/tmp\/RPSLoad.log' )\r\n\r\n\/\/ NGINX 2 Rule\r\n\/\/ -------------\r\nvar n2Data = batch\r\n    |query(''' SELECT mean(\"active\") AS \"mean_active\" FROM \"CLMCMetrics\".\"autogen\".\"nginx\" WHERE \"ipendpoint\"='adaptive_streaming_I1_nginx2' ''')\r\n        .period(5s)\r\n        .every(5s)\r\n\r\nvar n2Alert = n2Data\r\n    |alert()\r\n        .id('{{ .Name }}\/adaptive_streaming_I1_nginx2')\r\n        .message('{{ .ID }} is {{ .Level }} Mean active connections: {{ index .Fields \"mean_active\" }}')\r\n        .warn(lambda: \"mean_active\" > 10)\r\n        .slack()\r\n        .log( '\/tmp\/RPSLoad.log' )",
-    
-    "status" : "enabled"
-}
\ No newline at end of file
diff --git a/test/streaming/run.sh b/test/streaming/run.sh
deleted file mode 100755
index 81c7d5f6aba81658bf416e8decd9d62d7b96f6a1..0000000000000000000000000000000000000000
--- a/test/streaming/run.sh
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/bin/bash
-#/////////////////////////////////////////////////////////////////////////
-#//
-#// (c) University of Southampton IT Innovation Centre, 2017
-#//
-#// Copyright in this software belongs to University of Southampton
-#// IT Innovation Centre of Gamma House, Enterprise Road,
-#// Chilworth Science Park, Southampton, SO16 7NS, UK.
-#//
-#// This software may not be used, sold, licensed, transferred, copied
-#// or reproduced in whole or in part in any manner or form or in or
-#// on any media by any person other than in accordance with the terms
-#// of the Licence Agreement supplied with the software, or otherwise
-#// without the prior written consent of the copyright owners.
-#//
-#// This software is distributed WITHOUT ANY WARRANTY, without even the
-#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-#// PURPOSE, except where stated in the Licence Agreement supplied with
-#// the software.
-#//
-#//      Created By :            Michael Boniface
-#//      Created Date :          15/02/2017
-#//      Created for Project :   FLAME
-#//
-#/////////////////////////////////////////////////////////////////////////
-
-if [ "$#" -ne 3 ]; then
-    echo "Error: illegal number of arguments: "$#
-      echo "Usage: run.sh TEST_RUN_DIR STREAM_URI MAX_CLIENTS"
-      exit 
-fi
-
-# create test directories
-TEST_FOLDER=$(date +%Y%m%d%H%M%S) 
-TEST_RUN_DIR=$1
-TEST_DIR=$TEST_RUN_DIR"/streaming/"$TEST_FOLDER
-echo "Test directory: "$TEST_DIR
-mkdir -p  "$TEST_DIR"
-
-# run testplan
-cd $TEST_DIR
-
-#jmeter -n -LDEBUG -t /vagrant/test/streaming/testplan.jmx -l results.jtx -j jmeter.log
-
-# quick bash equivalent in case Jmeter fails
-STREAM_URI=$2
-COUNTER=0
-MAX_CLIENTS=$3
-while [  $COUNTER -lt $MAX_CLIENTS ]; do
-  # run cvlc headless, redirect stderr into stdout, pipe that into the report.sh script
-  cvlc -Vdummy --no-audio $STREAM_URI 2>&1 | /vagrant/test/streaming/report.sh ${COUNTER} &
-  sleep 1
-  let COUNTER=COUNTER+1 
-done
-
-
-
diff --git a/test/streaming/setupCLMC.sh b/test/streaming/setupCLMC.sh
deleted file mode 100644
index 6d2bd38390aca17ad2ad89a2debb6d5f89eab794..0000000000000000000000000000000000000000
--- a/test/streaming/setupCLMC.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash
-#/////////////////////////////////////////////////////////////////////////
-#//
-#// (c) University of Southampton IT Innovation Centre, 2018
-#//
-#// Copyright in this software belongs to University of Southampton
-#// IT Innovation Centre of Gamma House, Enterprise Road,
-#// Chilworth Science Park, Southampton, SO16 7NS, UK.
-#//
-#// This software may not be used, sold, licensed, transferred, copied
-#// or reproduced in whole or in part in any manner or form or in or
-#// on any media by any person other than in accordance with the terms
-#// of the Licence Agreement supplied with the software, or otherwise
-#// without the prior written consent of the copyright owners.
-#//
-#// This software is distributed WITHOUT ANY WARRANTY, without even the
-#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-#// PURPOSE, except where stated in the Licence Agreement supplied with
-#// the software.
-#//
-#//      Created By :            Simon Crowle
-#//      Created Date :          14/02/2018
-#//      Created for Project :   FLAME
-#//
-#/////////////////////////////////////////////////////////////////////////
-
-TEST_DIR=$1
-
-# copy Kapacitor conf to /etc/kapacitor and restart
-
-systemctl stop kapacitor
-echo $TEST_DIR"/kapacitor.conf"
-cp $TEST_DIR/kapacitor.conf /etc/kapacitor/kapacitor.conf
-systemctl start kapacitor
-
-# wait for kapacitor to restart
-# TODO: do this better
-sleep 5
-
-# Set up Influx data source
-curl -i -X POST -H "Content-Type: application/json" http://localhost:8888/chronograf/v1/sources -d @$TEST_DIR/influx.json
-
-# Set up Kapacitor
-curl -i -X POST -H "Content-Type: application/json" http://localhost:8888/chronograf/v1/sources/1/kapacitors -d @$TEST_DIR/kapacitor.json
-
-# Set up rules
-curl -i -X POST -H "Content-Type: application/json" http://localhost:9092/kapacitor/v1/tasks -d @$TEST_DIR/rules.json
-
-# Set up dashboard
-curl -i -X POST -H "Content-Type: application/json" http://localhost:8888/chronograf/v1/dashboards -d @$TEST_DIR/dashboard.json
diff --git a/test/streaming/setupNGINX.sh b/test/streaming/setupNGINX.sh
deleted file mode 100644
index 3833350c7e2a157538c5014d12195627c5aaf538..0000000000000000000000000000000000000000
--- a/test/streaming/setupNGINX.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/bash
-#/////////////////////////////////////////////////////////////////////////
-#//
-#// (c) University of Southampton IT Innovation Centre, 2018
-#//
-#// Copyright in this software belongs to University of Southampton
-#// IT Innovation Centre of Gamma House, Enterprise Road,
-#// Chilworth Science Park, Southampton, SO16 7NS, UK.
-#//
-#// This software may not be used, sold, licensed, transferred, copied
-#// or reproduced in whole or in part in any manner or form or in or
-#// on any media by any person other than in accordance with the terms
-#// of the Licence Agreement supplied with the software, or otherwise
-#// without the prior written consent of the copyright owners.
-#//
-#// This software is distributed WITHOUT ANY WARRANTY, without even the
-#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-#// PURPOSE, except where stated in the Licence Agreement supplied with
-#// the software.
-#//
-#//      Created By :            Simon Crowle
-#//      Created Date :          14/02/2018
-#//      Created for Project :   FLAME
-#//
-#/////////////////////////////////////////////////////////////////////////
-
-# NGINX
-DEST_DIR="/usr/share/nginx/html"
-
-TEST_VIDEO="20180212104221flame-project-full.mp4"
-TEST_VIDEO_ARCHIVE=$TEST_VIDEO".gz"
-DEST_FILE=$DEST_DIR"/"$TEST_VIDEO_ARCHIVE
-
-echo "ftp://ftp.it-innovation.soton.ac.uk/testdata/video/"$TEST_VIDEO_ARCHIVE
-
-# Copy files for MPEG-DASH testing
-curl "ftp://ftp.it-innovation.soton.ac.uk/testdata/video/"$TEST_VIDEO_ARCHIVE --user flame-rw:DR8ngj3ogSjd8gl -o $DEST_FILE
-tar -xvf $DEST_FILE -C $DEST_DIR
-
-rm -rf $DEST_FILE
-mv $DEST_DIR"/"$TEST_VIDEO $DEST_DIR"/"test_video
diff --git a/test/streaming/stop.sh b/test/streaming/stop.sh
deleted file mode 100755
index b332fe3b1d7d1e9ff2e974cb59b036e2252d0bc9..0000000000000000000000000000000000000000
--- a/test/streaming/stop.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
-
-for pid in $(ps -ef | grep "/usr/bin/vlc" | awk '{print $2}'); do kill -9 $pid; done
-# TODO: 'killall vlc' should work: need to test though
\ No newline at end of file
diff --git a/test/streaming/test_rspec.py b/test/streaming/test_rspec.py
deleted file mode 100644
index 0bbea5403b59178f609661eb2f4fd280822b5b74..0000000000000000000000000000000000000000
--- a/test/streaming/test_rspec.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/python3
-
-from subprocess import run
-from platform import system
-import pytest
-
-
-@pytest.mark.parametrize("service_name", [
-    'clmc-service',
-    'nginx1',
-    'nginx2',
-    'loadtest-streaming'
-])
-def test_service_names(streaming_config, service_name):
-    """
-    Tests the service names in the configuration.
-
-    :param streaming_config: the configuration fixture collected from conftest.py
-    :param service_name the service name to test
-    """
-
-    assert any(s['name'] == service_name for s in streaming_config['hosts']), "{0} not in list of hosts".format(service_name)
-    print("\nSuccessfully passed configuration test for service name {0}\n".format(service_name))
-
-
-def test_ping(streaming_config):
-    """
-    Pings each service to test for liveliness
-
-    :param streaming_config: the configuration fixture collected from conftest.py
-    """
-
-    print("\n")  # blank line printed for formatting purposes
-
-    ping_count = 1
-    system_dependent_param = "-n" if system().lower() == "windows" else "-c"
-
-    for service in streaming_config['hosts']:
-        command = ["ping", system_dependent_param, str(ping_count), service['ip_address']]
-        assert run(command).returncode == 0, "Service ping test failed for {0} with ip address {1}".format(service['name'], service['ip_address'])
-        print("\nSuccessfully passed ping test for service: {0}\n".format(service['name']))
diff --git a/test/streaming/test_streaming.py b/test/streaming/test_streaming.py
deleted file mode 100644
index c1b7f77190e144a8c77a63c9f83d63e62d1fd1ad..0000000000000000000000000000000000000000
--- a/test/streaming/test_streaming.py
+++ /dev/null
@@ -1,177 +0,0 @@
-#!/usr/bin/python3
-
-from threading import Thread
-from time import sleep
-from queue import Queue
-from xml.etree import ElementTree
-from urllib.parse import urljoin
-from os.path import isfile, dirname, join
-from os import remove, system
-import pytest
-import requests
-import json
-
-
-class TestStreamingAlerts(object):
-    """
-    A testing class used to group all the tests related to the streaming scenario.
-    """
-
-    kapacitor_url = "http://localhost:9092/kapacitor/v1/tasks"
-
-    @pytest.mark.parametrize("rule, log", [
-        ("rules.json", "/tmp/RPSLoad.log"),
-    ])
-    def test_alerts(self, rule, log, streaming_url, streaming_manifest):
-        """
-        This test case generates some streaming requests to the server to ensure an alert is triggered and then tests the log file for this alert. Different logs can be tested by
-        appending to the list of parameters in the pytest decorator.
-
-        Format for pytest parameters under test:
-        ([filename], [log])
-        where [filename] is the name of the json file for the rule under test (must be in the same folder as this test is)
-              [log] is the absolute path of the log file that must be created due to an alert
-
-        :param rule: the name of the rule json file
-        :param log: the path of the log file that is under test
-        :param streaming_url: the fixture providing the streaming url for this test case
-        :param streaming_manifest: the fixture providing the root of the XML streaming manifest
-        """
-
-        kapacitor_setter = self.kapacitor_setting(rule)
-        next(kapacitor_setter)  # Setup the test rule
-
-        try:
-            if isfile(log):
-                remove(log)  # delete log file if existing from previous tests
-        except PermissionError:
-            system("sudo rm {0}".format(log))  # handles the case for running on linux where permission will be required to delete the old log file
-
-        segments = streaming_manifest.findall(".//{urn:mpeg:DASH:schema:MPD:2011}SegmentURL")
-
-        threads_num = 30
-        threads_queue = Queue(maxsize=threads_num)  # a synchronized queue is used to track if all the threads has finished execution
-        threads = [StreamingThread(streaming_url, segments, threads_queue) for _ in range(threads_num)]
-        for t in threads:
-            t.start()
-
-        alert_created = False
-        while True:
-            # loop while threads are execution and do a check every 2.5 seconds to check if either alert log has been created or threads have finished execution
-            sleep(2.5)
-            if isfile(log):
-                for t in threads:  # kill all running threads in case log file is created beforehand
-                    t.stop()
-                alert_created = True
-
-            if threads_queue.full():
-                break
-
-        assert alert_created, "Alerts test failed: no log file is created indicating a triggered alert."
-
-        print("\nSuccessfully passed alert creation test.\n")
-
-        next(kapacitor_setter)  # Teardown the test rule
-
-    def kapacitor_setting(self, rule):
-        """
-        A generator function used to provide setUp/tearDown actions for a particular kapacitor rule.
-        On setUp rule is initialized, on tearDown rule is deleted. Interleaving is achieved using the generator pattern.
-
-        :param rule: the name of the json file for the rule under test
-        """
-
-        # Initialization of the kapacitor rule - Test setUp (UnitTest style)
-        with open(join(dirname(__file__), rule), "r") as rule_file:
-            data = "".join(line.strip() for line in rule_file.readlines())
-
-        rule_data = json.loads(data)
-        requests.delete(url=urljoin(self.kapacitor_url + "/", rule_data.get("id")))  # delete in case of a task with the same ID already set in the kapacitor
-        requests.post(url=self.kapacitor_url, data=data, headers={"Content-Type": "application/json"})
-
-        yield
-
-        # Deleting the kapacitor rule used for testing - Test tearDown (UnitTest style)
-        requests.delete(url=urljoin(self.kapacitor_url + "/", rule_data.get("id")))
-        yield
-
-    @staticmethod
-    @pytest.fixture(scope="class", params=[{"server": "http://192.168.50.11", "video": "/test_video/stream.mpd"}])
-    def streaming_url(request):
-        """
-        A fixture with class scope - used only in the scope of the testing class.
-
-        :param request: the parameters for this fixture - server url and video relative url
-        :return: the combined URL for the video used for streaming
-        """
-
-        return urljoin(request.param["server"], request.param["video"])
-
-    @staticmethod
-    @pytest.fixture(scope="class")
-    def streaming_manifest(streaming_url):
-        """
-        A fixture to download the manifest file for the streamed video and parse the downloaded XML content
-
-        :param streaming_url: the fixture which provides the streaming url
-        :return: an XML root node object
-        """
-
-        manifest_xml = requests.get(streaming_url).text
-        root = ElementTree.fromstring(manifest_xml)
-        return root
-
-
-class StreamingThread(Thread):
-
-    def __init__(self, url, segments, queue):
-        """
-        Subclassing the Thread class to create a custom streaming thread.
-
-        :param url: the streaming url
-        :param segments: the list of SegmentURL XML nodes
-        :param queue: an auxiliary parameter used to indicate when this thread has finished execution
-        """
-
-        super(StreamingThread, self).__init__()
-        self.running = False
-        self.url = url
-        self.segments = segments
-        self.queue = queue
-        self._test_finished = False  # a flag to indicate whether the thread should stop running
-
-    def stop(self):
-        """
-        Kill this thread and suspend its execution.
-        """
-
-        self._test_finished = True
-
-    def run(self):
-        """
-        A function, which simulates an actual streaming by downloading different audio/video segments from the server using a request session,
-        which leaves the connection open until executing.
-        """
-
-        size = len(self.segments)
-        size = size if size % 2 == 0 else size - 1
-
-        s = requests.session()
-
-        for i in range(0, int(size / 2), 1):
-            segment_audio = self.segments[0]
-            segment_video = self.segments[int(size / 2) + i]
-            segment_audio_url = segment_audio.attrib.get('media')
-            segment_video_url = segment_video.attrib.get('media')
-
-            s.get(urljoin(self.url, segment_audio_url))
-            s.get(urljoin(self.url, segment_video_url))
-
-            # check if thread is killed in case the test has already succeeded
-            if self._test_finished:
-                break
-
-            # a small time out to mimic the behaviour of a real streaming
-            sleep(2.5)
-
-        self.queue.put(True)
diff --git a/test/telegraf-agents/__init__.py b/test/telegraf-agents/__init__.py
deleted file mode 100644
index 44f772595799f5fe338534918c95e23e08e80464..0000000000000000000000000000000000000000
--- a/test/telegraf-agents/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-#!/usr/bin/python3
\ No newline at end of file
diff --git a/test/telegraf-agents/conftest.py b/test/telegraf-agents/conftest.py
deleted file mode 100644
index b096dd4d6875bed42e3c2842148c7b2f6db7a32d..0000000000000000000000000000000000000000
--- a/test/telegraf-agents/conftest.py
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/python3
-
-import pytest
-import yaml
-from influxdb import InfluxDBClient
-
-
-@pytest.fixture(scope="module", params=[{'config': {'rspec': 'test/telegraf-agents/rspec.yml'}}])
-def telegraf_agent_config(request):
-    """
-    Reads the service configuration deployed for the streaming simulation test.
-
-    :param request: access the parameters of the fixture
-    :return: the python object representing the read YAML file
-    """
-
-    with open(request.param['config']['rspec'], 'r') as stream:
-        data_loaded = yaml.load(stream)
-    return data_loaded
-
-
-@pytest.fixture(params=[{'database': 'CLMCMetrics'}], scope='module')
-def influxdb(telegraf_agent_config, request):
-    """
-    Creates an Influx DB client for the CLMC metrics database
-
-    :param telegraf_agent_config: the fixture returning the yaml configuration
-    :param request: access the parameters of the fixture
-    :return: the created Influx DB client
-    """
-
-    return InfluxDBClient(host=telegraf_agent_config['hosts'][0]['ip_address'], port=8086, database=request.param['database'], timeout=10)
diff --git a/test/telegraf-agents/rspec.yml b/test/telegraf-agents/rspec.yml
deleted file mode 100644
index 6dad4be08d89d3f1bcc31e65e92e909c591d14fd..0000000000000000000000000000000000000000
--- a/test/telegraf-agents/rspec.yml
+++ /dev/null
@@ -1,103 +0,0 @@
-hosts:
-  - name: clmc-service
-    cpus: 1
-    memory: 2048
-    disk: "10GB"
-    forward_ports:
-      - guest: 8086
-        host: 8086
-      - guest: 8888
-        host: 8888
-      - guest: 9092
-        host: 9092
-    ip_address: "192.168.50.10"
-  - name: apache
-    cpus: 1
-    memory: 2048
-    disk: "10GB"
-    service_name: "apache"
-    forward_ports:
-      - guest: 80
-        host: 8881
-    ip_address: "192.168.50.11"
-    location: "DC1"
-    sfc_id: "MS_Template_1"
-    sfc_id_instance: "MS_I1"
-    sf_id: "adaptive_streaming"
-    sf_id_instance: "adaptive_streaming_I1"
-    ipendpoint_id: "adaptive_streaming_I1_apache1"
-    influxdb_url: "http://192.168.50.10:8086"
-    database_name: "CLMCMetrics"
-  - name: nginx
-    cpus: 1
-    memory: 2048
-    disk: "10GB"
-    service_name: "nginx"
-    forward_ports:
-      - guest: 80
-        host: 8082
-    ip_address: "192.168.50.13"
-    location: "DC1"
-    sfc_id: "MS_Template_1"
-    sfc_id_instance: "MS_I1"
-    sf_id: "adaptive_streaming"
-    sf_id_instance: "adaptive_streaming_nginx_I1"
-    ipendpoint_id: "adaptive_streaming_nginx_I1_apache1"
-    influxdb_url: "http://192.168.50.10:8086"
-    database_name: "CLMCMetrics"
-  - name: mongo
-    cpus: 1
-    memory: 2048
-    disk: "10GB"
-    service_name: "mongo"
-    forward_ports:
-      - guest: 80
-        host: 8083
-    ip_address: "192.168.50.14"
-    location: "DC1"
-    sfc_id: "MS_Template_1"
-    sfc_id_instance: "MS_I1"
-    sf_id: "metadata_database"
-    sf_id_instance: "metadata_database_I1"
-    ipendpoint_id: "metadata_database_I1_apache1"
-    influxdb_url: "http://192.168.50.10:8086"
-    database_name: "CLMCMetrics" 
-  - name: ffmpeg
-    cpus: 1
-    memory: 2048
-    disk: "10GB"
-    service_name: "ffmpeg"
-    forward_ports:
-      - guest: 80
-        host: 8084
-    ip_address: "192.168.50.15"
-    location: "DC1"
-    sfc_id: "MS_Template_1"
-    sfc_id_instance: "MS_I1"
-    sf_id: "metadata_database"
-    sf_id_instance: "metadata_database_I1"
-    ipendpoint_id: "metadata_database_I1_apache1"
-    influxdb_url: "http://192.168.50.10:8086"
-    database_name: "CLMCMetrics" 
-  - name: host
-    cpus: 1
-    memory: 2048
-    disk: "10GB"
-    service_name: "host"
-    forward_ports:
-      - guest: 80
-        host: 8085
-    ip_address: "192.168.50.16"
-    location: "DC1"
-    sfc_id: "MS_Template_1"
-    sfc_id_instance: "MS_I1"
-    sf_id: "adaptive_streaming"
-    sf_id_instance: "adaptive_streaming_I1"
-    ipendpoint_id: "adaptive_streaming_I1_apache1"
-    influxdb_url: "http://192.168.50.10:8086"
-    database_name: "CLMCMetrics"
-  - name: test-runner
-    cpus: 1
-    memory: 2048
-    disk: "10GB"
-    ip_address: "192.168.50.17"    
\ No newline at end of file
diff --git a/test/telegraf-agents/test_rspec.py b/test/telegraf-agents/test_rspec.py
deleted file mode 100644
index 5442eed2d366f64c32e8896f87de02f662e21dae..0000000000000000000000000000000000000000
--- a/test/telegraf-agents/test_rspec.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/python3
-
-from subprocess import run
-from platform import system
-import pytest
-
-
-@pytest.mark.parametrize("service_name", [
-    'clmc-service',
-    'apache',
-    'nginx',
-    'mongo',
-    'ffmpeg',
-    'host'
-])
-def test_service_name(telegraf_agent_config, service_name):
-    """
-    Tests the service names in the configuration.
-
-    :param telegraf_agent_config: the configuration fixture collected from conftest.py
-    :param service_name the service name to test
-    """
-
-    assert any(s['name'] == service_name for s in telegraf_agent_config['hosts']), "{0} not in list of hosts".format(service_name)
-    print("\nSuccessfully passed configuration test for service name {0}\n".format(service_name))
-
-
-def test_ping(telegraf_agent_config):
-    """
-    Pings each service to test for liveliness
-
-    :param telegraf_agent_config: the configuration fixture collected from conftest.py
-    """
-
-    print("\n")  # blank line printed for formatting purposes
-
-    ping_count = 1
-    system_dependent_param = "-n" if system().lower() == "windows" else "-c"
-
-    for service in telegraf_agent_config['hosts']:
-        command = ["ping", system_dependent_param, str(ping_count), service['ip_address']]
-        assert run(command).returncode == 0, "Service ping test failed for {0} with ip address {1}".format(service['name'], service['ip_address'])
-        print("\nSuccessfully passed ping test for service: {0}\n".format(service['name']))
diff --git a/test/telegraf-agents/test_telegraf_agents.py b/test/telegraf-agents/test_telegraf_agents.py
deleted file mode 100644
index b2e8ce59e229dc1862f200e833e946c6c6272d2e..0000000000000000000000000000000000000000
--- a/test/telegraf-agents/test_telegraf_agents.py
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/usr/bin/python3
-
-import pytest
-from subprocess import run
-from platform import system
-from influxdb import InfluxDBClient
-
-@pytest.mark.parametrize("service_name", [
-    ('clmc-service'),
-    ('apache'),
-    ('nginx'),
-    ('mongo'),
-    ('ffmpeg'),
-    ('host'),
-    ])
-def test_service_name(telegraf_agent_config, service_name):
-    assert any(s['name'] == service_name for s in telegraf_agent_config['hosts']), "{0} not in list of hosts".format(service_name)
-    
-def test_ping(telegraf_agent_config):
-    """
-    Pings each service to test for liveliness
-
-    :param streaming_sim_config: the configuration fixture collected from conftest.py
-    """
-
-    print("\n")  # blank line printed for formatting purposes
-
-    ping_count = 1
-    system_dependent_param = "-n" if system().lower() == "windows" else "-c"
-
-    for service in telegraf_agent_config['hosts']:
-        command = ["ping", system_dependent_param, str(ping_count), service['ip_address']]
-        assert run(command).returncode == 0, "Service ping test failed for {0} with ip address {1}".format(service['name'], service['ip_address'])
-        print("\nSuccessfully passed ping test for service: {0}\n".format(service['name']))
-
-
-@pytest.mark.parametrize("measurement, query, expected_result", [
-    ('nginx', 'SELECT mean("requests") AS "mean" FROM "CLMCMetrics"."autogen"."nginx"', 0),
-    ('cpu', 'SELECT mean("usage_idle") AS "mean" FROM "CLMCMetrics"."autogen"."cpu"', 0),
-    ('mongodb', 'SELECT mean("net_in_bytes") AS "mean" FROM "CLMCMetrics"."autogen"."mongodb"', 0),
-    ('net', 'SELECT mean("bytes_sent") AS "mean" FROM "CLMCMetrics"."autogen"."net"', 0),
-    ('disk', 'SELECT mean("free") AS "mean" FROM "CLMCMetrics"."autogen"."disk"', 0),
-    ('diskio', 'SELECT mean("write_bytes") AS "mean" FROM "CLMCMetrics"."autogen"."diskio"', 0),
-    ('mem', 'SELECT mean("free") AS "mean" FROM "CLMCMetrics"."autogen"."mem"', 0)              
-    ])
-def test_all_inputs(influxdb, measurement, query, expected_result):
-    """
-    Tests measurements are received from an input plugin aggregated across all services
-
-    :param influxdb: the influx db client fixture
-    :param measurement: the measurement to test
-    :param query: the query to execute
-    :param expected_result: the expected result from the query
-    """
-
-    query_result = influxdb.query('SHOW measurements ON "CLMCMetrics"')
-    points = list(query_result.get_points())
-    assert any(p['name'] == measurement for p in points), "{0} not in measurement list".format(measurement)
-    
-    query_result = influxdb.query(query)
-    points = next(query_result.get_points())  # get_points() returns a generator, to take the first element we can use the next() function
-    actual_result = points['mean']
-    assert actual_result > expected_result, "actual result {0} is not > expected result {1} for query {2}".format(actual_result, str(expected_result), query)
-
-
-@pytest.mark.parametrize("query, expected_result", 
-    [('filter query', 0),
-     ('filter query', 0),
-     ('filter query', 0)
-    ])
-def test_global_tag_filtering(influxdb, query, expected_result):
-    """Tests that the global tags are inserted correctly into the global configuration using the install CLMC script
-    """
-    # run query
-    # check result
-    assert 1