diff --git a/.gitignore b/.gitignore
index 0476cfee01b91a5923d5646c7739608e904b73e3..07e683d3972836af2d108f67b090eaa53a5eaf08 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,4 +2,5 @@
 /src/clmc-spec/nbproject/
 /src/clmc-spec/target/
 .vagrant/
-.log
\ No newline at end of file
+.log
+ubuntu-xenial-16.04-cloudimg-console.log
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..22b603ef9178dffb15c49eef1e802a5e807a0266
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,59 @@
+Copyright (c) 2017-2018 University of Southampton IT Innovation Centre
+
+Licence to use IT Innovation Software for carrying out the FLAME Project
+
+DEFINITIONS
+
+"Grant Agreement" means EC Grant Agreement 731677 — FLAME — 
+H2020-ICT-2016-2017/H2020-ICT-2016-1, relating to the Project 
+FLAME: Facility for Largescale Adaptive Media Experimentation (FLAME).
+
+"Software" means ‘Cross Layer Management and Control’ or CLMC, and any 
+documentation and any error corrections provided by IT Innovation.
+
+"IT Innovation" means the University of Southampton acting through the 
+IT Innovation Centre of Gamma House, Enterprise Road, Southampton SO16 7NS, UK.
+
+"You" means any Contractor identified in the Grant Agreement.
+
+Words defined in the Grant Agreement or in the Consortium Agreement have 
+the same meaning in this Licence
+
+ACCEPTANCE
+
+By using the Software, You accept the terms of this Licence.
+
+INTELLECTUAL PROPERTY RIGHTS	
+
+The Software is IT Innovation Knowledge. The Software is confidential and 
+copyrighted. Title to the Software and all associated intellectual property 
+rights are retained by IT Innovation.
+
+LICENCE	
+
+IT Innovation grants You a free non-exclusive and non-transferable licence
+giving You Access Rights to the Software for carrying out the Project, as 
+set out in the Grant Agreement Article 31 and the Consortium Agreement 
+section 9.
+
+RESTRICTIONS
+
+This Licence specifically excludes Access Rights for Use outside the Project 
+as set out in the Consortium Agreement section 9. You may not assign or 
+transfer this Licence. You may not sublicense the Software. You may not make 
+copies of the Software, other than for carrying out the Project and for backup 
+purposes. You may not modify the Software.
+
+LIABILITY
+
+This Licence implies no warranty, as set out in the Consortium Agreement 
+section 5.
+
+TERMINATION
+
+This Licence is effective until the end of the Project.  You may terminate 
+this Licence at any time by written notice to IT Innovation. This Licence 
+will terminate immediately without notice from IT Innovation if You fail to 
+comply with any provision of this Licence. Upon Termination, You must destroy 
+all copies of the Software.
+
diff --git a/Vagrantfile b/Vagrantfile
index e7b2319968c38bf4ea6b9fd42e5294a59634a76d..a38600d3d6619e996dba8e279526fb7cfc65bb49 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -22,160 +22,83 @@
 #//      Created for Project :   FLAME
 #//
 #/////////////////////////////////////////////////////////////////////////
-# Define ipendpoint configuration parameters
-
-ipendpoints = {
-  "apache1" => {
-    :ip_address => "192.168.50.11",
-    :location => "DC1",
-    :sfc_id => "MS_Template_1",
-    :sfc_id_instance => "MS_I1",
-    :sf_id => "adaptive_streaming",
-    :sf_id_instance => "adaptive_streaming_I1",
-    :ipendpoint_id => "adaptive_streaming_I1_apache1",
-    :influxdb_url => "http://192.168.50.10:8086",
-    :database_name => "CLMCMetrics"
-  },
-  "apache2" => { 
-    :ip_address => "192.168.50.12",
-    :location => "DC2",
-    :sfc_id => "MS_Template_1",
-    :sfc_id_instance => "MS_I1",
-    :sf_id => "adaptive_streaming",
-    :sf_id_instance => "adaptive_streaming_I1",
-    :ipendpoint_id => "adaptive_streaming_I1_apache2",
-    :influxdb_url => "http://192.168.50.10:8086",
-    :database_name => "CLMCMetrics"      
-  },
-  "nginx1" => {
-    :ip_address => "192.168.50.13",
-    :location => "DC1",
-    :sfc_id => "MS_Template_1",
-    :sfc_id_instance => "MS_I1",
-    :sf_id => "adaptive_streaming",
-    :sf_id_instance => "adaptive_streaming_nginx_I1",
-    :ipendpoint_id => "adaptive_streaming_nginx_I1_apache1",
-    :influxdb_url => "http://192.168.50.10:8086",
-    :database_name => "CLMCMetrics"
-  },
-  "mongo1" => { 
-    :ip_address => "192.168.50.14",
-    :location => "DC1",
-    :sfc_id => "MS_Template_1",
-    :sfc_id_instance => "MS_I1",
-    :sf_id => "metadata_database",
-    :sf_id_instance => "metadata_database_I1",
-    :ipendpoint_id => "metadata_database_I1_apache1",
-    :influxdb_url => "http://192.168.50.10:8086",
-    :database_name => "CLMCMetrics"      
-  }
+# Requirements
+require 'getoptlong'
+require 'yaml'
+
+# Custom options:
+#   --infra <infradir>
+
+# Set defaults
+DEFAULT_INFRA = "streaming"
+
+# Define custom options
+opts = GetoptLong.new(
+  [ '--infra', GetoptLong::OPTIONAL_ARGUMENT]
+)
+
+# Retrieve custom option values
+infra = DEFAULT_INFRA
+opts.each do |opt, arg|
+ case opt
+   when '--infra'
+    infra = arg    
+ end
+end
 
-}
+# load custom config file
+puts "loading custom infrastructure configuration: #{infra}"
+puts "custom config file: /infra/#{infra}/rspec.yml"
+host_rspec_file = "infra/#{infra}/rspec.yml"
+hosts = YAML.load_file(host_rspec_file)
 
+# Start creating VMS using xenial64 as the base box
 Vagrant.configure("2") do |config|
   config.vm.box = "ubuntu/xenial64"
- 
-  config.vm.define "clmc-service" do |my|
-
-      config.vm.network :private_network, ip: "192.168.50.10", virtualbox__intnet: "clmc-net"
-
-      my.vm.provider "virtualbox" do |v|
-        v.customize ["modifyvm", :id, "--memory", 2048]
-        v.customize ["modifyvm", :id, "--cpus", 1]
+  #config.vm.box = "hashicorp/precise32"
+
+  # Dynamic VMs
+  hosts['hosts'].each do |host|
+    #p host["name"]
+    instance_name = host["name"]
+    config.vm.define instance_name do |instance_config|
+
+      # Specify VM properties
+      instance_config.vm.hostname = instance_name
+      instance_config.disksize.size = host["disk"]
+      instance_config.vm.provider "virtualbox" do |v|
+        v.customize ["modifyvm", :id, "--memory", host["memory"]]
+        v.customize ["modifyvm", :id, "--cpus", host["cpus"]]
       end
 
-      # open InfluxDB port
-      config.vm.network "forwarded_port", guest: 8086, host: 8086 
-
-      # open Chronograf port
-      config.vm.network "forwarded_port", guest: 8888, host: 8888
-
-      # open Kapacitor port
-      config.vm.network "forwarded_port", guest: 9092, host: 9092
-
-      # install the CLMC service
-      config.vm.provision :shell, :path => 'scripts/influx/install-clmc-service.sh'
+      # Configure network, not that we only expect 1 test to be running so we have one internal network
+      instance_config.vm.network :private_network, ip: "#{host["ip_address"]}", virtualbox__intnet: "clmc-net"
 
-      # start the CLMC service
-      config.vm.provision :shell, :path => 'scripts/influx/start-clmc-service.sh'
-  end  
-  # Apache Server 1
-  config.vm.define "apache1" do |my|
-      config.vm.network :private_network, ip: "#{ipendpoints['apache1'][:ip_address]}", virtualbox__intnet: "clmc-net"
-
-      my.vm.provider "virtualbox" do |v|
-        v.customize ["modifyvm", :id, "--memory", 512]
-        v.customize ["modifyvm", :id, "--cpus", 1]
+      # Port forwarding
+      puts "Forwarding the following specified ports for #{host["name"]}:"
+      host['forward_ports'].each do |port|
+        puts "Forwarding guest:#{port["guest"]} => host:#{port["host"]}"
+        instance_config.vm.network "forwarded_port", guest: port["guest"], host: port["host"]
       end
-
-      # open apache port
-      config.vm.network "forwarded_port", guest: 80, host: 8080
-
-      # install the apache  service
-      config.vm.provision :shell, :path => 'scripts/apache/install-apache.sh'
-
-      # Install CLMC agent 1
-      config.vm.provision :shell, :path => 'scripts/influx/install-clmc-agent.sh', :args => "/vagrant/scripts/apache/telegraf_apache_template.conf #{ipendpoints['apache1'][:location]} #{ipendpoints['apache1'][:sfc_id]} #{ipendpoints['apache1'][:sfc_id_instance]} #{ipendpoints['apache1'][:sf_id]} #{ipendpoints['apache1'][:sf_id_instance]} #{ipendpoints['apache1'][:ipendpoint_id]} #{ipendpoints['apache1'][:influxdb_url]} #{ipendpoints['apache1'][:database_name]}"
-  end    
-  # Apache Server 2
-  config.vm.define "apache2" do |my|
-
-      config.vm.network :private_network, ip: "#{ipendpoints['apache2'][:ip_address]}", virtualbox__intnet: "clmc-net"
-
-      my.vm.provider "virtualbox" do |v|
-        v.customize ["modifyvm", :id, "--memory", 512]
-        v.customize ["modifyvm", :id, "--cpus", 1]
+      
+      # Switch case added here to make clmc-service provisioning simple without having to have a complex rspec.yml file
+      # We only run a service installation script and the agent installation script when creating a specific service VM, not the clmc-service VM
+      
+      puts "Instance name #{instance_name}:"
+      case instance_name
+        when 'clmc-service'
+          instance_config.vm.provision :shell, :path => "scripts/clmc-service/install.sh"
+        else
+          # specific service install
+          instance_config.vm.provision :shell, :path => "test/services/#{host["service_name"]}/install.sh", env: {"REPO_ROOT" => "/vagrant"}
+    
+          # CLMC agent install
+          instance_config.vm.provision :shell, :path => "scripts/clmc-agent/install.sh", :args => "/vagrant/test/services/#{host["service_name"]}/telegraf_template.conf #{host["location"]} #{host["sfc_id"]} #{host["sfc_id_instance"]} #{host["sf_id"]} #{host["sf_id_instance"]} #{host["ipendpoint_id"]} #{host["influxdb_url"]} #{host["database_name"]}"
       end
 
-      # open apache port
-      config.vm.network "forwarded_port", guest: 80, host: 8081 
-
-      # install the apache  service
-      config.vm.provision :shell, :path => 'scripts/apache/install-apache.sh'
-
-      # Install CLMC agent
-      config.vm.provision :shell, :path => 'scripts/influx/install-clmc-agent.sh', :args => "/vagrant/scripts/apache/telegraf_apache_template.conf #{ipendpoints['apache2'][:location]} #{ipendpoints['apache2'][:sfc_id]} #{ipendpoints['apache2'][:sfc_id_instance]} #{ipendpoints['apache2'][:sf_id]} #{ipendpoints['apache2'][:sf_id_instance]} #{ipendpoints['apache2'][:ipendpoint_id]} #{ipendpoints['apache2'][:influxdb_url]} #{ipendpoints['apache2'][:database_name]}"      
+      
+	  end
   end
-  
-  # NGINX VM
-  
-  config.vm.define "NGINX_Service" do |my|
-
-      config.vm.network :private_network, ip: "#{ipendpoints['nginx1'][:ip_address]}", virtualbox__intnet: "clmc-net"
-
-      my.vm.provider "virtualbox" do |v|
-        v.customize ["modifyvm", :id, "--memory", 512]
-        v.customize ["modifyvm", :id, "--cpus", 1]
-      end
-
-      # open apache port
-      config.vm.network "forwarded_port", guest: 80, host: 8081 
-
-      # install the apache  service
-      config.vm.provision :shell, :path => 'scripts/apache/install-nginx.sh'
-
-      # Install CLMC agent
-      config.vm.provision :shell, :path => 'scripts/influx/install-clmc-agent.sh', :args => "/vagrant/scripts/nginx/telegraf_nginx_template.conf #{ipendpoints['nginx1'][:location]} #{ipendpoints['nginx1'][:sfc_id]} #{ipendpoints['nginx1'][:sfc_id_instance]} #{ipendpoints['nginx1'][:sf_id]} #{ipendpoints['nginx1'][:sf_id_instance]} #{ipendpoints['nginx1'][:ipendpoint_id]} #{ipendpoints['nginx1'][:influxdb_url]} #{ipendpoints['nginx1'][:database_name]}"      
-  end
-  
-  # MONGODB VM
-  config.vm.define "MONGO_Service" do |my|
-
-      config.vm.network :private_network, ip: "#{ipendpoints['mongo1'][:ip_address]}", virtualbox__intnet: "clmc-net"
-
-      my.vm.provider "virtualbox" do |v|
-        v.customize ["modifyvm", :id, "--memory", 512]
-        v.customize ["modifyvm", :id, "--cpus", 1]
-      end
-
-      # open apache port
-      config.vm.network "forwarded_port", guest: 80, host: 8081 
-
-      # install the apache  service
-      config.vm.provision :shell, :path => 'scripts/apache/install-mongo.sh'
-
-      # Install CLMC agent
-      config.vm.provision :shell, :path => 'scripts/influx/install-clmc-agent.sh', :args => "/vagrant/scripts/mongo/telegraf_mongo_template.conf #{ipendpoints['mongo1'][:location]} #{ipendpoints['mongo1'][:sfc_id]} #{ipendpoints['mongo1'][:sfc_id_instance]} #{ipendpoints['mongo1'][:sf_id]} #{ipendpoints['mongo1'][:sf_id_instance]} #{ipendpoints['mongo1'][:ipendpoint_id]} #{ipendpoints['mongo1'][:influxdb_url]} #{ipendpoints['mongo1'][:database_name]}"      
-  end
-  
+ 
+>>>>>>> a71470f1da6c7784e7b5cf15c9f589d2c010eac6
 end
diff --git a/infra/full/rspec.yml b/infra/full/rspec.yml
new file mode 100644
index 0000000000000000000000000000000000000000..06acf9ac6c4421600da2fabd9d00ecd901a1432d
--- /dev/null
+++ b/infra/full/rspec.yml
@@ -0,0 +1,99 @@
+hosts:
+  - name: clmc-service
+    cpus: 1
+    memory: 2048
+    disk: "10GB"
+    forward_ports:
+      - guest: 8086
+        host: 8086
+      - guest: 8888
+        host: 8888
+      - guest: 9092
+        host: 9092
+    ip_address: "192.168.50.10"
+  - name: apache1
+    cpus: 1
+    memory: 2048
+    disk: "10GB"
+    service_name: "apache"
+    forward_ports:
+      - guest: 80
+        host: 8081
+    ip_address: "192.168.50.11"
+    location: "DC1"
+    sfc_id: "MS_Template_1"
+    sfc_id_instance: "MS_I1"
+    sf_id: "adaptive_streaming"
+    sf_id_instance: "adaptive_streaming_I1"
+    ipendpoint_id: "adaptive_streaming_I1_apache1"
+    influxdb_url: "http://192.168.50.10:8086"
+    database_name: "CLMCMetrics"
+  - name: apache2
+    cpus: 1
+    memory: 2048
+    disk: "10GB"
+    service_name: "apache"
+    forward_ports:
+      - guest: 80
+        host: 8082
+    ip_address: "192.168.50.12"
+    location: "DC2"
+    sfc_id: "MS_Template_1"
+    sfc_id_instance: "MS_I1"
+    sf_id: "adaptive_streaming"
+    sf_id_instance: "adaptive_streaming_I1"
+    ipendpoint_id: "adaptive_streaming_I1_apache2"
+    influxdb_url: "http://192.168.50.10:8086"
+    database_name: "CLMCMetrics"      
+    
+  - name: nginx
+    cpus: 1
+    memory: 2048
+    disk: "10GB"
+    service_name: "nginx"
+    forward_ports:
+      - guest: 80
+        host: 8083
+    ip_address: "192.168.50.13"
+    location: "DC1"
+    sfc_id: "MS_Template_1"
+    sfc_id_instance: "MS_I1"
+    sf_id: "adaptive_streaming"
+    sf_id_instance: "adaptive_streaming_nginx_I1"
+    ipendpoint_id: "adaptive_streaming_nginx_I1_apache1"
+    influxdb_url: "http://192.168.50.10:8086"
+    database_name: "CLMCMetrics"
+  - name: mongo
+    cpus: 1
+    memory: 2048
+    disk: "10GB"
+    service_name: "mongo"
+    forward_ports:
+      - guest: 80
+        host: 8084
+    ip_address: "192.168.50.14"
+    location: "DC1"
+    sfc_id: "MS_Template_1"
+    sfc_id_instance: "MS_I1"
+    sf_id: "metadata_database"
+    sf_id_instance: "metadata_database_I1"
+    ipendpoint_id: "metadata_database_I1_apache1"
+    influxdb_url: "http://192.168.50.10:8086"
+    database_name: "CLMCMetrics" 
+  - name: ffmpeg
+    cpus: 1
+    memory: 2048
+    disk: "10GB"
+    service_name: "ffmpeg"
+    forward_ports:
+      - guest: 80
+        host: 8085
+    ip_address: "192.168.50.14"
+    location: "DC1"
+    sfc_id: "MS_Template_1"
+    sfc_id_instance: "MS_I1"
+    sf_id: "metadata_database"
+    sf_id_instance: "metadata_database_I1"
+    ipendpoint_id: "metadata_database_I1_apache1"
+    influxdb_url: "http://192.168.50.10:8086"
+    database_name: "CLMCMetrics" 
diff --git a/infra/streaming-sim/rspec.yml b/infra/streaming-sim/rspec.yml
new file mode 100644
index 0000000000000000000000000000000000000000..cd62eebf3f4e06842d28f0ae76d25f9f205a18b7
--- /dev/null
+++ b/infra/streaming-sim/rspec.yml
@@ -0,0 +1,47 @@
+hosts:
+  - name: clmc-service
+    cpus: 1
+    memory: 2048
+    disk: "10GB"
+    forward_ports:
+      - guest: 8086
+        host: 8086
+      - guest: 8888
+        host: 8888
+      - guest: 9092
+        host: 9092
+    ip_address: "192.168.50.10"
+  - name: ipendpoint1
+    cpus: 1
+    memory: 2048
+    disk: "10GB"
+    service_name: "ipendpoint"
+    forward_ports:
+      - guest: 80
+        host: 8081
+    ip_address: "192.168.50.11"
+    location: "DC1"
+    sfc_id: "MS_Template_1"
+    sfc_id_instance: "MS_I1"
+    sf_id: "adaptive_streaming"
+    sf_id_instance: "adaptive_streaming_I1"
+    ipendpoint_id: "adaptive_streaming_I1_apache1"
+    influxdb_url: "http://192.168.50.10:8086"
+    database_name: "CLMCMetrics"
+  - name: ipendpoint12
+    cpus: 1
+    memory: 2048
+    disk: "10GB"
+    service_name: "ipendpoint"
+    forward_ports:
+      - guest: 80
+        host: 8082
+    ip_address: "192.168.50.12"
+    location: "DC2"
+    sfc_id: "MS_Template_1"
+    sfc_id_instance: "MS_I1"
+    sf_id: "adaptive_streaming"
+    sf_id_instance: "adaptive_streaming_I1"
+    ipendpoint_id: "adaptive_streaming_I1_apache2"
+    influxdb_url: "http://192.168.50.10:8086"
+    database_name: "CLMCMetrics"      
\ No newline at end of file
diff --git a/infra/streaming/rspec.yml b/infra/streaming/rspec.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8cbcda904a502387f9d9e95029e0c3d3132558fe
--- /dev/null
+++ b/infra/streaming/rspec.yml
@@ -0,0 +1,64 @@
+hosts:
+  - name: clmc-service
+    cpus: 1
+    memory: 2048
+    disk: "10GB"
+    forward_ports:
+      - guest: 8086
+        host: 8086
+      - guest: 8888
+        host: 8888
+      - guest: 9092
+        host: 9092
+    ip_address: "192.168.50.10"
+  - name: apache1
+    cpus: 2
+    memory: 4096
+    disk: "10GB"
+    service_name: "apache"
+    forward_ports:
+      - guest: 80
+        host: 8081
+    ip_address: "192.168.50.11"
+    location: "DC1"
+    sfc_id: "MS_Template_1"
+    sfc_id_instance: "MS_I1"
+    sf_id: "adaptive_streaming"
+    sf_id_instance: "adaptive_streaming_I1"
+    ipendpoint_id: "adaptive_streaming_I1_apache1"
+    influxdb_url: "http://192.168.50.10:8086"
+    database_name: "CLMCMetrics"
+  - name: apache2
+    cpus: 2
+    memory: 4096
+    disk: "10GB"
+    service_name: "apache"
+    forward_ports:
+      - guest: 80
+        host: 8082
+    ip_address: "192.168.50.12"
+    location: "DC2"
+    sfc_id: "MS_Template_1"
+    sfc_id_instance: "MS_I1"
+    sf_id: "adaptive_streaming"
+    sf_id_instance: "adaptive_streaming_I1"
+    ipendpoint_id: "adaptive_streaming_I1_apache2"
+    influxdb_url: "http://192.168.50.10:8086"
+    database_name: "CLMCMetrics"
+  - name: loadtest-streaming
+    cpus: 2
+    memory: 4096
+    disk: "10GB"
+    service_name: "loadtest-streaming"
+    forward_ports:
+      - guest: 80
+        host: 8083
+    ip_address: "192.168.50.12"
+    location: "DC1"
+    sfc_id: "MS_Template_1"
+    sfc_id_instance: "MS_I1"
+    sf_id: "adaptive_streaming_client"
+    sf_id_instance: "adaptive_streaming_I1"
+    ipendpoint_id: "adaptive_streaming_I1_client1"
+    influxdb_url: "http://192.168.50.10:8086"
+    database_name: "CLMCMetrics"    
diff --git a/scripts/clmc-agent/install.sh b/scripts/clmc-agent/install.sh
new file mode 100755
index 0000000000000000000000000000000000000000..9626d91cab2f3814483c477645b8ba370df8d028
--- /dev/null
+++ b/scripts/clmc-agent/install.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+#/////////////////////////////////////////////////////////////////////////
+#//
+#// (c) University of Southampton IT Innovation Centre, 2017
+#//
+#// Copyright in this software belongs to University of Southampton
+#// IT Innovation Centre of Gamma House, Enterprise Road,
+#// Chilworth Science Park, Southampton, SO16 7NS, UK.
+#//
+#// This software may not be used, sold, licensed, transferred, copied
+#// or reproduced in whole or in part in any manner or form or in or
+#// on any media by any person other than in accordance with the terms
+#// of the Licence Agreement supplied with the software, or otherwise
+#// without the prior written consent of the copyright owners.
+#//
+#// This software is distributed WITHOUT ANY WARRANTY, without even the
+#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+#// PURPOSE, except where stated in the Licence Agreement supplied with
+#// the software.
+#//
+#//      Created By :            Michael Boniface
+#//      Created Date :          13/12/2017
+#//      Created for Project :   FLAME
+#//
+#/////////////////////////////////////////////////////////////////////////
+
+# Install telegraf
+if [ "$#" -ne 9 ]; then
+    echo "Error: illegal number of arguments: "$#
+      echo "Usage: install-clmc-agent.sh TELEGRAF_CONF_FILE LOCATION SFC_ID SFC_ID_INSTANCE SF_ID SF_ID_INSTANCE IP_ENDPOINT_ID INFLUXDB_URL DATABASE_NAME"
+      exit 
+fi
+
+TELEGRAF_CONF_FILE=$1
+LOCATION=$2
+SFC_ID=$3
+SFC_ID_INSTANCE=$4
+SF_ID=$5
+SF_ID_INSTANCE=$6
+IP_ENDPOINT_ID=$7
+INFLUXDB_URL=$8
+DATABASE_NAME=$9
+
+if [ ! -f $TELEGRAF_CONF_FILE ]; then
+    echo "Error: Telegraf conf template file not found: "$TELEGRAF_CONF_FILE
+    exit
+fi
+
+wget https://dl.influxdata.com/telegraf/releases/telegraf_1.3.2-1_amd64.deb
+dpkg -i telegraf_1.3.2-1_amd64.deb
+
+# Copy configuration
+echo "Telegraf config file: " $TELEGRAF_CONF_FILE
+cp $TELEGRAF_CONF_FILE /etc/telegraf/telegraf.conf
+
+echo "INFLUXDB_URL: " $INFLUXDB_URL
+echo "DATABASE_NAME: " $DATABASE_NAME
+
+# Replace template parameters
+sed -i 's/{{LOCATION}}/'$LOCATION'/g' /etc/telegraf/telegraf.conf
+sed -i 's/{{SFC_ID}}/'$SFC_ID'/g' /etc/telegraf/telegraf.conf
+sed -i 's/{{SFC_ID_INSTANCE}}/'$SFC_ID_INSTANCE'/g' /etc/telegraf/telegraf.conf
+sed -i 's/{{SF_ID}}/'$SF_ID'/g' /etc/telegraf/telegraf.conf
+sed -i 's/{{SF_ID_INSTANCE}}/'$SF_ID_INSTANCE'/g' /etc/telegraf/telegraf.conf
+sed -i 's/{{IP_ENDPOINT_ID}}/'$IP_ENDPOINT_ID'/g' /etc/telegraf/telegraf.conf
+sed -i 's|{{INFLUXDB_URL}}|'$INFLUXDB_URL'|g' /etc/telegraf/telegraf.conf
+sed -i 's/{{DATABASE_NAME}}/'$DATABASE_NAME'/g' /etc/telegraf/telegraf.conf
+
+# Start telegraf
+systemctl start telegraf
\ No newline at end of file
diff --git a/scripts/clmc-agent/telegraf_ipendpoint_template.conf b/scripts/clmc-agent/telegraf_ipendpoint_template.conf
new file mode 100644
index 0000000000000000000000000000000000000000..2358dcca5bfcd48d4b45e0e1ccd316357f1e4ba7
--- /dev/null
+++ b/scripts/clmc-agent/telegraf_ipendpoint_template.conf
@@ -0,0 +1,112 @@
+# Telegraf configuration
+
+# Telegraf is entirely plugin driven. All metrics are gathered from the
+# declared inputs, and sent to the declared outputs.
+
+# Plugins must be declared in here to be active.
+# To deactivate a plugin, comment out the name and any variables.
+
+# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
+# file would generate.
+
+# Global tags can be specified here in key="value" format.
+[global_tags]
+  # location of the data centre
+  location="{{LOCATION}}"
+  # media service template id
+  sfc="{{SFC_ID}}"
+  # media service instance
+  sfc_i="{{SFC_ID_INSTANCE}}"
+  # service function type
+  sf="{{SF_ID}}"
+  # service function instance id
+  sf_i="{{SF_ID_INSTANCE}}"
+  # ipendpoint id aka surrogate instance
+  ipendpoint="{{IP_ENDPOINT_ID}}"
+
+# Configuration for telegraf agent
+[agent]
+  ## Default data collection interval for all inputs
+  interval = "10s"
+  ## Rounds collection interval to 'interval'
+  ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
+  round_interval = true
+
+  ## Telegraf will cache metric_buffer_limit metrics for each output, and will
+  ## flush this buffer on a successful write.
+  metric_buffer_limit = 1000
+  ## Flush the buffer whenever full, regardless of flush_interval.
+  flush_buffer_when_full = true
+
+  ## Collection jitter is used to jitter the collection by a random amount.
+  ## Each plugin will sleep for a random time within jitter before collecting.
+  ## This can be used to avoid many plugins querying things like sysfs at the
+  ## same time, which can have a measurable effect on the system.
+  collection_jitter = "0s"
+
+  ## Default flushing interval for all outputs. You shouldn't set this below
+  ## interval. Maximum flush_interval will be flush_interval + flush_jitter
+  flush_interval = "10s"
+  ## Jitter the flush interval by a random amount. This is primarily to avoid
+  ## large write spikes for users running a large number of telegraf instances.
+  ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
+  flush_jitter = "0s"
+
+  ## Logging configuration:
+  ## Run telegraf in debug mode
+  debug = false
+  ## Run telegraf in quiet mode
+  quiet = false
+  ## Specify the log file name. The empty string means to log to stdout.
+  logfile = "G:/Telegraf/telegraf.log"
+
+  ## Override default hostname, if empty use os.Hostname()
+  hostname = ""
+
+
+###############################################################################
+#                                  OUTPUTS                                    #
+###############################################################################
+
+# Configuration for influxdb server to send metrics to
+[[outputs.influxdb]]
+  # The full HTTP or UDP endpoint URL for your InfluxDB instance.
+  # Multiple urls can be specified but it is assumed that they are part of the same
+  # cluster, this means that only ONE of the urls will be written to each interval.
+  # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
+  urls = ["{{INFLUXDB_URL}}"] # required
+  # The target database for metrics (telegraf will create it if not exists)
+  database = "{{DATABASE_NAME}}" # required
+  # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+  # note: using second precision greatly helps InfluxDB compression
+  precision = "s"
+
+  ## Write timeout (for the InfluxDB client), formatted as a string.
+  ## If not provided, will default to 5s. 0s means no timeout (not recommended).
+  timeout = "5s"
+  # username = "telegraf"
+  # password = "metricsmetricsmetricsmetrics"
+  # Set the user agent for HTTP POSTs (can be useful for log differentiation)
+  # user_agent = "telegraf"
+  # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
+  # udp_payload = 512
+
+
+###############################################################################
+#                                  INPUTS                                     #
+###############################################################################
+# # Influx HTTP write listener
+[[inputs.http_listener]]
+  ## Address and port to host HTTP listener on
+  service_address = ":8186"
+
+  ## timeouts
+  read_timeout = "10s"
+  write_timeout = "10s"
+
+  ## HTTPS
+  #tls_cert= "/etc/telegraf/cert.pem"
+  #tls_key = "/etc/telegraf/key.pem"
+
+  ## MTLS
+  #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
diff --git a/scripts/clmc-service/install.sh b/scripts/clmc-service/install.sh
new file mode 100755
index 0000000000000000000000000000000000000000..7850c751e62ed4adccc0d5a06b3a419dd6eee79e
--- /dev/null
+++ b/scripts/clmc-service/install.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+#/////////////////////////////////////////////////////////////////////////
+#//
+#// (c) University of Southampton IT Innovation Centre, 2017
+#//
+#// Copyright in this software belongs to University of Southampton
+#// IT Innovation Centre of Gamma House, Enterprise Road,
+#// Chilworth Science Park, Southampton, SO16 7NS, UK.
+#//
+#// This software may not be used, sold, licensed, transferred, copied
+#// or reproduced in whole or in part in any manner or form or in or
+#// on any media by any person other than in accordance with the terms
+#// of the Licence Agreement supplied with the software, or otherwise
+#// without the prior written consent of the copyright owners.
+#//
+#// This software is distributed WITHOUT ANY WARRANTY, without even the
+#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+#// PURPOSE, except where stated in the Licence Agreement supplied with
+#// the software.
+#//
+#//      Created By :            Michael Boniface
+#//      Created Date :          13/12/2017
+#//      Created for Project :   FLAME
+#//
+#/////////////////////////////////////////////////////////////////////////
+
+# install python for the simulator
+apt-get update
+apt-get -y install python
+
+# install influx
+wget https://dl.influxdata.com/influxdb/releases/influxdb_1.2.4_amd64.deb
+dpkg -i influxdb_1.2.4_amd64.deb
+
+# install kapacitor
+wget https://dl.influxdata.com/kapacitor/releases/kapacitor_1.3.1_amd64.deb
+dpkg -i kapacitor_1.3.1_amd64.deb
+
+# install Chronograf
+wget https://dl.influxdata.com/chronograf/releases/chronograf_1.3.3.0_amd64.deb
+dpkg -i chronograf_1.3.3.0_amd64.deb
+
+systemctl start influxdb
+systemctl start kapacitor
+systemctl start chronograf
diff --git a/test/services/apache/install.sh b/test/services/apache/install.sh
new file mode 100755
index 0000000000000000000000000000000000000000..8ae01814bc269ae55794c5c941899229ced68ec5
--- /dev/null
+++ b/test/services/apache/install.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+#/////////////////////////////////////////////////////////////////////////
+#//
+#// (c) University of Southampton IT Innovation Centre, 2017
+#//
+#// Copyright in this software belongs to University of Southampton
+#// IT Innovation Centre of Gamma House, Enterprise Road,
+#// Chilworth Science Park, Southampton, SO16 7NS, UK.
+#//
+#// This software may not be used, sold, licensed, transferred, copied
+#// or reproduced in whole or in part in any manner or form or in or
+#// on any media by any person other than in accordance with the terms
+#// of the Licence Agreement supplied with the software, or otherwise
+#// without the prior written consent of the copyright owners.
+#//
+#// This software is distributed WITHOUT ANY WARRANTY, without even the
+#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+#// PURPOSE, except where stated in the Licence Agreement supplied with
+#// the software.
+#//
+#//      Created By :            Michael Boniface
+#//      Created Date :          23/01/2018
+#//      Created for Project :   FLAME
+#//
+#/////////////////////////////////////////////////////////////////////////
+
+# Install apache
+sudo apt-get update
+sudo apt-get -y install apache2
+
+TEST_VIDEO="20180212104221flame-project-full.mp4"
+TEST_VIDEO_ARCHIVE=$TEST_VIDEO".gz"
+DEST_DIR="/var/www/html/" 
+DEST_FILE=$DEST_DIR"/"$TEST_VIDEO_ARCHIVE
+
+# Copy files for MPEG-DASH testing
+curl "ftp://ftp.it-innovation.soton.ac.uk/testdata/video/"$TEST_VIDEO_ARCHIVE --user flame-rw:DR8ngj3ogSjd8gl -o $DEST_FILE
+tar -xvf $DEST_FILE -C /var/www/html/
+
+rm -rf $DEST_FILE
+mv $DEST_DIR"/"$TEST_VIDEO $DEST_DIR"/"test_video
+
+# start apache
+apachectl -k start
+apachectl -k restart
diff --git a/test/services/apache/telegraf_template.conf b/test/services/apache/telegraf_template.conf
new file mode 100644
index 0000000000000000000000000000000000000000..be8d9f3321fecc13ee7562d001e498bf307f8d85
--- /dev/null
+++ b/test/services/apache/telegraf_template.conf
@@ -0,0 +1,154 @@
+# Telegraf configuration
+
+# Telegraf is entirely plugin driven. All metrics are gathered from the
+# declared inputs, and sent to the declared outputs.
+
+# Plugins must be declared in here to be active.
+# To deactivate a plugin, comment out the name and any variables.
+
+# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
+# file would generate.
+
+# Global tags can be specified here in key="value" format.
+[global_tags]
+  # location of the data centre
+  location="{{LOCATION}}"
+  # media service template id
+  sfc="{{SFC_ID}}"
+  # media service instance
+  sfc_i="{{SFC_ID_INSTANCE}}"
+  # service function type
+  sf="{{SF_ID}}"
+  # service function instance id
+  sf_i="{{SF_ID_INSTANCE}}"
+  # ipendpoint id aka surrogate instance
+  ipendpoint="{{IP_ENDPOINT_ID}}"
+
+# Configuration for telegraf agent
+[agent]
+  ## Default data collection interval for all inputs
+  interval = "10s"
+  ## Rounds collection interval to 'interval'
+  ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
+  round_interval = true
+
+  ## Telegraf will cache metric_buffer_limit metrics for each output, and will
+  ## flush this buffer on a successful write.
+  metric_buffer_limit = 1000
+  ## Flush the buffer whenever full, regardless of flush_interval.
+  flush_buffer_when_full = true
+
+  ## Collection jitter is used to jitter the collection by a random amount.
+  ## Each plugin will sleep for a random time within jitter before collecting.
+  ## This can be used to avoid many plugins querying things like sysfs at the
+  ## same time, which can have a measurable effect on the system.
+  collection_jitter = "0s"
+
+  ## Default flushing interval for all outputs. You shouldn't set this below
+  ## interval. Maximum flush_interval will be flush_interval + flush_jitter
+  flush_interval = "10s"
+  ## Jitter the flush interval by a random amount. This is primarily to avoid
+  ## large write spikes for users running a large number of telegraf instances.
+  ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
+  flush_jitter = "0s"
+
+  ## Logging configuration:
+  ## Run telegraf in debug mode
+  debug = false
+  ## Run telegraf in quiet mode
+  quiet = false
+  ## Specify the log file name. The empty string means to log to stdout.
+  logfile = "/var/log/telegraf/telegraf.log"
+
+  ## Override default hostname, if empty use os.Hostname()
+  hostname = ""
+
+
+###############################################################################
+#                                  OUTPUTS                                    #
+###############################################################################
+
+# Configuration for influxdb server to send metrics to
+[[outputs.influxdb]]
+  # The full HTTP or UDP endpoint URL for your InfluxDB instance.
+  # Multiple urls can be specified but it is assumed that they are part of the same
+  # cluster, this means that only ONE of the urls will be written to each interval.
+  # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
+  urls = ["{{INFLUXDB_URL}}"] # required
+  # The target database for metrics (telegraf will create it if not exists)
+  database = "{{DATABASE_NAME}}" # required
+  # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+  # note: using second precision greatly helps InfluxDB compression
+  precision = "s"
+
+  ## Write timeout (for the InfluxDB client), formatted as a string.
+  ## If not provided, will default to 5s. 0s means no timeout (not recommended).
+  timeout = "5s"
+  # username = "telegraf"
+  # password = "metricsmetricsmetricsmetrics"
+  # Set the user agent for HTTP POSTs (can be useful for log differentiation)
+  # user_agent = "telegraf"
+  # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
+  # udp_payload = 512
+
+
+###############################################################################
+#                                  INPUTS                                     #
+###############################################################################
+# # Influx HTTP write listener
+[[inputs.http_listener]]
+  ## Address and port to host HTTP listener on
+  service_address = ":8186"
+
+  ## timeouts
+  read_timeout = "10s"
+  write_timeout = "10s"
+
+  ## HTTPS
+  #tls_cert= "/etc/telegraf/cert.pem"
+  #tls_key = "/etc/telegraf/key.pem"
+
+  ## MTLS
+  #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+
+# Read Apache status information (mod_status)
+[[inputs.apache]]
+  ## An array of URLs to gather from, must be directed at the machine
+  ## readable version of the mod_status page including the auto query string.
+  ## Default is "http://localhost/server-status?auto".
+  urls = ["http://localhost/server-status?auto"]
+
+  ## Credentials for basic HTTP authentication.
+  # username = "myuser"
+  # password = "mypassword"
+
+  ## Maximum time to receive response.
+  # response_timeout = "5s"
+
+  ## Optional SSL Config
+  # ssl_ca = "/etc/telegraf/ca.pem"
+  # ssl_cert = "/etc/telegraf/cert.pem"
+  # ssl_key = "/etc/telegraf/key.pem"
+  ## Use SSL but skip chain & host verification
+  # insecure_skip_verify = false
+
+[[inputs.net_response]]
+  ## Protocol, must be "tcp" or "udp"
+  ## NOTE: because the "udp" protocol does not respond to requests, it requires
+  ## a send/expect string pair (see below).
+  protocol = "tcp"
+  ## Server address (default localhost)
+  address = "localhost:80"
+  ## Set timeout
+  timeout = "1s"
+
+  ## Set read timeout (only used if expecting a response)
+  read_timeout = "1s"
+
+  ## The following options are required for UDP checks. For TCP, they are
+  ## optional. The plugin will send the given string to the server and then
+  ## expect to receive the given 'expect' string back.
+  ## string sent to the server
+  # send = "ssh"
+  ## expected string in answer
+  # expect = "ssh"
\ No newline at end of file
diff --git a/test/services/ffmpeg/install.sh b/test/services/ffmpeg/install.sh
new file mode 100755
index 0000000000000000000000000000000000000000..02d6e1e3d7394a636b80731ac0c3d8aa5d295e1e
--- /dev/null
+++ b/test/services/ffmpeg/install.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+apt-get update
+apt-get -y install zip python
+
+wget http://zebulon.bok.net/Bento4/binaries/Bento4-SDK-1-5-1-621.x86_64-unknown-linux.zip
+unzip Bento4-SDK-1-5-1-621.x86_64-unknown-linux.zip
+
+mv Bento4-SDK-1-5-1-621.x86_64-unknown-linux /opt/ 
+rm Bento4-SDK-1-5-1-621.x86_64-unknown-linux.zip
+
+add-apt-repository -y ppa:jonathonf/ffmpeg-3
+apt-get update && apt -y install ffmpeg libav-tools x264 x265
+
+ffmpeg -version
diff --git a/test/services/ffmpeg/telegraf_template.conf b/test/services/ffmpeg/telegraf_template.conf
new file mode 100644
index 0000000000000000000000000000000000000000..3e30465f39ca7035ec8217909c9bc5d29942fa4e
--- /dev/null
+++ b/test/services/ffmpeg/telegraf_template.conf
@@ -0,0 +1,112 @@
+# Telegraf configuration
+
+# Telegraf is entirely plugin driven. All metrics are gathered from the
+# declared inputs, and sent to the declared outputs.
+
+# Plugins must be declared in here to be active.
+# To deactivate a plugin, comment out the name and any variables.
+
+# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
+# file would generate.
+
+# Global tags can be specified here in key="value" format.
+[global_tags]
+  # location of the data centre
+  location="{{LOCATION}}"
+  # media service template id
+  sfc="{{SFC_ID}}"
+  # media service instance
+  sfc_i="{{SFC_ID_INSTANCE}}"
+  # service function type
+  sf="{{SF_ID}}"
+  # service function instance id
+  sf_i="{{SF_ID_INSTANCE}}"
+  # ipendpoint id aka surrogate instance
+  ipendpoint="{{IP_ENDPOINT_ID}}"
+
+# Configuration for telegraf agent
+[agent]
+  ## Default data collection interval for all inputs
+  interval = "10s"
+  ## Rounds collection interval to 'interval'
+  ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
+  round_interval = true
+
+  ## Telegraf will cache metric_buffer_limit metrics for each output, and will
+  ## flush this buffer on a successful write.
+  metric_buffer_limit = 1000
+  ## Flush the buffer whenever full, regardless of flush_interval.
+  flush_buffer_when_full = true
+
+  ## Collection jitter is used to jitter the collection by a random amount.
+  ## Each plugin will sleep for a random time within jitter before collecting.
+  ## This can be used to avoid many plugins querying things like sysfs at the
+  ## same time, which can have a measurable effect on the system.
+  collection_jitter = "0s"
+
+  ## Default flushing interval for all outputs. You shouldn't set this below
+  ## interval. Maximum flush_interval will be flush_interval + flush_jitter
+  flush_interval = "10s"
+  ## Jitter the flush interval by a random amount. This is primarily to avoid
+  ## large write spikes for users running a large number of telegraf instances.
+  ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
+  flush_jitter = "0s"
+
+  ## Logging configuration:
+  ## Run telegraf in debug mode
+  debug = false
+  ## Run telegraf in quiet mode
+  quiet = false
+  ## Specify the log file name. The empty string means to log to stdout.
+  logfile = "/var/log/telegraf/telegraf.log"
+
+  ## Override default hostname, if empty use os.Hostname()
+  hostname = ""
+
+
+###############################################################################
+#                                  OUTPUTS                                    #
+###############################################################################
+
+# Configuration for influxdb server to send metrics to
+[[outputs.influxdb]]
+  # The full HTTP or UDP endpoint URL for your InfluxDB instance.
+  # Multiple urls can be specified but it is assumed that they are part of the same
+  # cluster, this means that only ONE of the urls will be written to each interval.
+  # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
+  urls = ["{{INFLUXDB_URL}}"] # required
+  # The target database for metrics (telegraf will create it if not exists)
+  database = "{{DATABASE_NAME}}" # required
+  # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+  # note: using second precision greatly helps InfluxDB compression
+  precision = "s"
+
+  ## Write timeout (for the InfluxDB client), formatted as a string.
+  ## If not provided, will default to 5s. 0s means no timeout (not recommended).
+  timeout = "5s"
+  # username = "telegraf"
+  # password = "metricsmetricsmetricsmetrics"
+  # Set the user agent for HTTP POSTs (can be useful for log differentiation)
+  # user_agent = "telegraf"
+  # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
+  # udp_payload = 512
+
+
+###############################################################################
+#                                  INPUTS                                     #
+###############################################################################
+# # Influx HTTP write listener
+[[inputs.http_listener]]
+  ## Address and port to host HTTP listener on
+  service_address = ":8186"
+
+  ## timeouts
+  read_timeout = "10s"
+  write_timeout = "10s"
+
+  ## HTTPS
+  #tls_cert= "/etc/telegraf/cert.pem"
+  #tls_key = "/etc/telegraf/key.pem"
+
+  ## MTLS
+  #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
\ No newline at end of file
diff --git a/test/services/ffmpeg/transcode.sh b/test/services/ffmpeg/transcode.sh
new file mode 100755
index 0000000000000000000000000000000000000000..877472b2be6f55f5ec8a60d9f2b3e41ba57ae301
--- /dev/null
+++ b/test/services/ffmpeg/transcode.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+#
+# Bento4 must be manually obtained from https://www.bento4.com/downloads/ ffmpeg can be installed as follows 
+# (Debian - http://www.deb-multimedia.org/) ~$ 
+# sudo echo deb http://www.deb-multimedia.org jessie main non-free >> /etc/apt/sources.list
+# ~$ sudo apt update ~$ sudo apt install deb-multimedia-keyring ~$ sudo apt update && sudo apt-get dist-upgrade
+#
+# First encode the video to 24fps!!! and MP4 (h.264)
+#
+# Video MP4 file
+INPUT=$1
+OUTPUT_iFRAMES="$1-iFrames.mp4"
+OUTPUT_FRAGMENTED="$OUTPUT_iFRAMES-Fragmented.mp4"
+
+OUTPUT_FOLDER_NAME=$(date +%Y%m%d%H%M%S)$1
+echo "OUTPUT_FOLDER_NAME: "$OUTPUT_FOLDER_NAME
+
+rm -rf $OUTPUT_FOLDER_NAME
+mkdir $OUTPUT_FOLDER_NAME
+
+# Insert Correct number of I frames
+#ffmpeg -y -i $INPUT -profile:v baseline -level 3.0 -c:a libfdk_aac -ac 2 -ab 128k -c:v libx264 -x264opts 'keyint=24:min-keyint=24:no-scenecut' -b:v 400k -maxrate 400k -bufsize 1000k -vf "scale=-1:360" $OUTPUT_iFRAMES
+
+ffmpeg -y -i $INPUT -profile:v baseline -level 3.0 -c:a aac -ac 2 -ab 128k -c:v libx264 -x264opts 'keyint=24:min-keyint=24:no-scenecut' -b:v 400k -maxrate 400k -bufsize 1000k -vf "scale=-1:360" -strict experimental $OUTPUT_FOLDER_NAME"/"$OUTPUT_iFRAMES
+
+# fragment MP4
+/opt/Bento4-SDK-1-5-1-621.x86_64-unknown-linux/bin/mp4fragment --timescale 1000 $OUTPUT_FOLDER_NAME"/"$OUTPUT_iFRAMES $OUTPUT_FOLDER_NAME"/"$OUTPUT_FRAGMENTED
+
+# Option 1 with Bento4
+/opt/Bento4-SDK-1-5-1-621.x86_64-unknown-linux/bin/mp4dash --mpd-name=stream.mpd --use-segment-list --use-compat-namespace -o $OUTPUT_FOLDER_NAME"/"$OUTPUT_FOLDER_NAME $OUTPUT_FOLDER_NAME"/"$OUTPUT_FRAGMENTED 
+
+cd $OUTPUT_FOLDER_NAME
+tar -cvzf $OUTPUT_FOLDER_NAME".gz" $OUTPUT_FOLDER_NAME
+
diff --git a/test/services/ipendpoint/install.sh b/test/services/ipendpoint/install.sh
new file mode 100755
index 0000000000000000000000000000000000000000..7cc5c340a91da881e4ea51765438c2d02346c100
--- /dev/null
+++ b/test/services/ipendpoint/install.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+#/////////////////////////////////////////////////////////////////////////
+#//
+#// (c) University of Southampton IT Innovation Centre, 2017
+#//
+#// Copyright in this software belongs to University of Southampton
+#// IT Innovation Centre of Gamma House, Enterprise Road,
+#// Chilworth Science Park, Southampton, SO16 7NS, UK.
+#//
+#// This software may not be used, sold, licensed, transferred, copied
+#// or reproduced in whole or in part in any manner or form or in or
+#// on any media by any person other than in accordance with the terms
+#// of the Licence Agreement supplied with the software, or otherwise
+#// without the prior written consent of the copyright owners.
+#//
+#// This software is distributed WITHOUT ANY WARRANTY, without even the
+#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+#// PURPOSE, except where stated in the Licence Agreement supplied with
+#// the software.
+#//
+#//      Created By :            Michael Boniface
+#//      Created Date :          23/01/2018
+#//      Created for Project :   FLAME
+#//
+#/////////////////////////////////////////////////////////////////////////
+
+# Install ipendpoint
+# This is a dummy script as the endpoint is driven by simulation
\ No newline at end of file
diff --git a/test/services/ipendpoint/telegraf_template.conf b/test/services/ipendpoint/telegraf_template.conf
new file mode 100644
index 0000000000000000000000000000000000000000..3e30465f39ca7035ec8217909c9bc5d29942fa4e
--- /dev/null
+++ b/test/services/ipendpoint/telegraf_template.conf
@@ -0,0 +1,112 @@
+# Telegraf configuration
+
+# Telegraf is entirely plugin driven. All metrics are gathered from the
+# declared inputs, and sent to the declared outputs.
+
+# Plugins must be declared in here to be active.
+# To deactivate a plugin, comment out the name and any variables.
+
+# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
+# file would generate.
+
+# Global tags can be specified here in key="value" format.
+[global_tags]
+  # location of the data centre
+  location="{{LOCATION}}"
+  # media service template id
+  sfc="{{SFC_ID}}"
+  # media service instance
+  sfc_i="{{SFC_ID_INSTANCE}}"
+  # service function type
+  sf="{{SF_ID}}"
+  # service function instance id
+  sf_i="{{SF_ID_INSTANCE}}"
+  # ipendpoint id aka surrogate instance
+  ipendpoint="{{IP_ENDPOINT_ID}}"
+
+# Configuration for telegraf agent
+[agent]
+  ## Default data collection interval for all inputs
+  interval = "10s"
+  ## Rounds collection interval to 'interval'
+  ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
+  round_interval = true
+
+  ## Telegraf will cache metric_buffer_limit metrics for each output, and will
+  ## flush this buffer on a successful write.
+  metric_buffer_limit = 1000
+  ## Flush the buffer whenever full, regardless of flush_interval.
+  flush_buffer_when_full = true
+
+  ## Collection jitter is used to jitter the collection by a random amount.
+  ## Each plugin will sleep for a random time within jitter before collecting.
+  ## This can be used to avoid many plugins querying things like sysfs at the
+  ## same time, which can have a measurable effect on the system.
+  collection_jitter = "0s"
+
+  ## Default flushing interval for all outputs. You shouldn't set this below
+  ## interval. Maximum flush_interval will be flush_interval + flush_jitter
+  flush_interval = "10s"
+  ## Jitter the flush interval by a random amount. This is primarily to avoid
+  ## large write spikes for users running a large number of telegraf instances.
+  ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
+  flush_jitter = "0s"
+
+  ## Logging configuration:
+  ## Run telegraf in debug mode
+  debug = false
+  ## Run telegraf in quiet mode
+  quiet = false
+  ## Specify the log file name. The empty string means to log to stdout.
+  logfile = "/var/log/telegraf/telegraf.log"
+
+  ## Override default hostname, if empty use os.Hostname()
+  hostname = ""
+
+
+###############################################################################
+#                                  OUTPUTS                                    #
+###############################################################################
+
+# Configuration for influxdb server to send metrics to
+[[outputs.influxdb]]
+  # The full HTTP or UDP endpoint URL for your InfluxDB instance.
+  # Multiple urls can be specified but it is assumed that they are part of the same
+  # cluster, this means that only ONE of the urls will be written to each interval.
+  # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
+  urls = ["{{INFLUXDB_URL}}"] # required
+  # The target database for metrics (telegraf will create it if not exists)
+  database = "{{DATABASE_NAME}}" # required
+  # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+  # note: using second precision greatly helps InfluxDB compression
+  precision = "s"
+
+  ## Write timeout (for the InfluxDB client), formatted as a string.
+  ## If not provided, will default to 5s. 0s means no timeout (not recommended).
+  timeout = "5s"
+  # username = "telegraf"
+  # password = "metricsmetricsmetricsmetrics"
+  # Set the user agent for HTTP POSTs (can be useful for log differentiation)
+  # user_agent = "telegraf"
+  # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
+  # udp_payload = 512
+
+
+###############################################################################
+#                                  INPUTS                                     #
+###############################################################################
+# # Influx HTTP write listener
+[[inputs.http_listener]]
+  ## Address and port to host HTTP listener on
+  service_address = ":8186"
+
+  ## timeouts
+  read_timeout = "10s"
+  write_timeout = "10s"
+
+  ## HTTPS
+  #tls_cert= "/etc/telegraf/cert.pem"
+  #tls_key = "/etc/telegraf/key.pem"
+
+  ## MTLS
+  #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
\ No newline at end of file
diff --git a/test/services/jmeter/install.sh b/test/services/jmeter/install.sh
new file mode 100644
index 0000000000000000000000000000000000000000..937af590612b872f9be174ae58e5f9b6211f3fe6
--- /dev/null
+++ b/test/services/jmeter/install.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+#/////////////////////////////////////////////////////////////////////////
+#//
+#// (c) University of Southampton IT Innovation Centre, 2017
+#//
+#// Copyright in this software belongs to University of Southampton
+#// IT Innovation Centre of Gamma House, Enterprise Road,
+#// Chilworth Science Park, Southampton, SO16 7NS, UK.
+#//
+#// This software may not be used, sold, licensed, transferred, copied
+#// or reproduced in whole or in part in any manner or form or in or
+#// on any media by any person other than in accordance with the terms
+#// of the Licence Agreement supplied with the software, or otherwise
+#// without the prior written consent of the copyright owners.
+#//
+#// This software is distributed WITHOUT ANY WARRANTY, without even the
+#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+#// PURPOSE, except where stated in the Licence Agreement supplied with
+#// the software.
+#//
+#//      Created By :            Michael Boniface
+#//      Created Date :          12/12/2017
+#//      Created for Project :   FLAME
+#//
+#/////////////////////////////////////////////////////////////////////////
+
+# install java 8
+add-apt-repository -y ppa:webupd8team/java
+apt-get -y update
+
+# tell the oracle java installer the license has been read
+echo debconf shared/accepted-oracle-license-v1-1 select true | debconf-set-selections
+echo debconf shared/accepted-oracle-license-v1-1 seen true | debconf-set-selections
+DEBIAN_FRONTEND=noninteractive apt-get install -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" oracle-java8-installer
+
+# install JMeter
+JMETER_VERSION="apache-jmeter-4.0"
+wget http://mirror.vorboss.net/apache//jmeter/binaries/$JMETER_VERSION.tgz -P /tmp
+tar -C /opt -xf /tmp/$JMETER_VERSION.tgz
+rm -rf /tmp/$JMETER_VERSION.tgz
+
+# Add jmeter to the path system wide
+#echo 'PATH="'$PATH':/opt/'$JMETER_VERSION'/bin"' > /etc/environment
+
+echo 'PATH="'$PATH':/opt/apache-jmeter-4.0/bin"' > /etc/environment
+source /etc/environment
diff --git a/test/services/loadtest-streaming/install.sh b/test/services/loadtest-streaming/install.sh
new file mode 100644
index 0000000000000000000000000000000000000000..8c1f2fb318ec9d627034187441fe17d03d0f5edd
--- /dev/null
+++ b/test/services/loadtest-streaming/install.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+#/////////////////////////////////////////////////////////////////////////
+#//
+#// (c) University of Southampton IT Innovation Centre, 2017
+#//
+#// Copyright in this software belongs to University of Southampton
+#// IT Innovation Centre of Gamma House, Enterprise Road,
+#// Chilworth Science Park, Southampton, SO16 7NS, UK.
+#//
+#// This software may not be used, sold, licensed, transferred, copied
+#// or reproduced in whole or in part in any manner or form or in or
+#// on any media by any person other than in accordance with the terms
+#// of the Licence Agreement supplied with the software, or otherwise
+#// without the prior written consent of the copyright owners.
+#//
+#// This software is distributed WITHOUT ANY WARRANTY, without even the
+#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+#// PURPOSE, except where stated in the Licence Agreement supplied with
+#// the software.
+#//
+#//      Created By :            Michael Boniface
+#//      Created Date :          14/02/2018
+#//      Created for Project :   FLAME
+#//
+#/////////////////////////////////////////////////////////////////////////
+
+set -euo pipefail
+
+echo "REPO_ROOT:"$REPO_ROOT
+
+eval '$REPO_ROOT/test/services/vlc/install.sh'
+eval '$REPO_ROOT/test/services/jmeter/install.sh'
diff --git a/test/services/loadtest-streaming/telegraf_template.conf b/test/services/loadtest-streaming/telegraf_template.conf
new file mode 100644
index 0000000000000000000000000000000000000000..3e30465f39ca7035ec8217909c9bc5d29942fa4e
--- /dev/null
+++ b/test/services/loadtest-streaming/telegraf_template.conf
@@ -0,0 +1,112 @@
+# Telegraf configuration
+
+# Telegraf is entirely plugin driven. All metrics are gathered from the
+# declared inputs, and sent to the declared outputs.
+
+# Plugins must be declared in here to be active.
+# To deactivate a plugin, comment out the name and any variables.
+
+# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
+# file would generate.
+
+# Global tags can be specified here in key="value" format.
+[global_tags]
+  # location of the data centre
+  location="{{LOCATION}}"
+  # media service template id
+  sfc="{{SFC_ID}}"
+  # media service instance
+  sfc_i="{{SFC_ID_INSTANCE}}"
+  # service function type
+  sf="{{SF_ID}}"
+  # service function instance id
+  sf_i="{{SF_ID_INSTANCE}}"
+  # ipendpoint id aka surrogate instance
+  ipendpoint="{{IP_ENDPOINT_ID}}"
+
+# Configuration for telegraf agent
+[agent]
+  ## Default data collection interval for all inputs
+  interval = "10s"
+  ## Rounds collection interval to 'interval'
+  ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
+  round_interval = true
+
+  ## Telegraf will cache metric_buffer_limit metrics for each output, and will
+  ## flush this buffer on a successful write.
+  metric_buffer_limit = 1000
+  ## Flush the buffer whenever full, regardless of flush_interval.
+  flush_buffer_when_full = true
+
+  ## Collection jitter is used to jitter the collection by a random amount.
+  ## Each plugin will sleep for a random time within jitter before collecting.
+  ## This can be used to avoid many plugins querying things like sysfs at the
+  ## same time, which can have a measurable effect on the system.
+  collection_jitter = "0s"
+
+  ## Default flushing interval for all outputs. You shouldn't set this below
+  ## interval. Maximum flush_interval will be flush_interval + flush_jitter
+  flush_interval = "10s"
+  ## Jitter the flush interval by a random amount. This is primarily to avoid
+  ## large write spikes for users running a large number of telegraf instances.
+  ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
+  flush_jitter = "0s"
+
+  ## Logging configuration:
+  ## Run telegraf in debug mode
+  debug = false
+  ## Run telegraf in quiet mode
+  quiet = false
+  ## Specify the log file name. The empty string means to log to stdout.
+  logfile = "/var/log/telegraf/telegraf.log"
+
+  ## Override default hostname, if empty use os.Hostname()
+  hostname = ""
+
+
+###############################################################################
+#                                  OUTPUTS                                    #
+###############################################################################
+
+# Configuration for influxdb server to send metrics to
+[[outputs.influxdb]]
+  # The full HTTP or UDP endpoint URL for your InfluxDB instance.
+  # Multiple urls can be specified but it is assumed that they are part of the same
+  # cluster, this means that only ONE of the urls will be written to each interval.
+  # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
+  urls = ["{{INFLUXDB_URL}}"] # required
+  # The target database for metrics (telegraf will create it if not exists)
+  database = "{{DATABASE_NAME}}" # required
+  # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+  # note: using second precision greatly helps InfluxDB compression
+  precision = "s"
+
+  ## Write timeout (for the InfluxDB client), formatted as a string.
+  ## If not provided, will default to 5s. 0s means no timeout (not recommended).
+  timeout = "5s"
+  # username = "telegraf"
+  # password = "metricsmetricsmetricsmetrics"
+  # Set the user agent for HTTP POSTs (can be useful for log differentiation)
+  # user_agent = "telegraf"
+  # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
+  # udp_payload = 512
+
+
+###############################################################################
+#                                  INPUTS                                     #
+###############################################################################
+# # Influx HTTP write listener
+[[inputs.http_listener]]
+  ## Address and port to host HTTP listener on
+  service_address = ":8186"
+
+  ## timeouts
+  read_timeout = "10s"
+  write_timeout = "10s"
+
+  ## HTTPS
+  #tls_cert= "/etc/telegraf/cert.pem"
+  #tls_key = "/etc/telegraf/key.pem"
+
+  ## MTLS
+  #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
\ No newline at end of file
diff --git a/test/services/mongo/install.sh b/test/services/mongo/install.sh
new file mode 100755
index 0000000000000000000000000000000000000000..e00502fe6746d41101372afe1f88a7ffcec11a1a
--- /dev/null
+++ b/test/services/mongo/install.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+#/////////////////////////////////////////////////////////////////////////
+#//
+#// (c) University of Southampton IT Innovation Centre, 2017
+#//
+#// Copyright in this software belongs to University of Southampton
+#// IT Innovation Centre of Gamma House, Enterprise Road,
+#// Chilworth Science Park, Southampton, SO16 7NS, UK.
+#//
+#// This software may not be used, sold, licensed, transferred, copied
+#// or reproduced in whole or in part in any manner or form or in or
+#// on any media by any person other than in accordance with the terms
+#// of the Licence Agreement supplied with the software, or otherwise
+#// without the prior written consent of the copyright owners.
+#//
+#// This software is distributed WITHOUT ANY WARRANTY, without even the
+#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+#// PURPOSE, except where stated in the Licence Agreement supplied with
+#// the software.
+#//
+#//      Created By :            Michael Boniface
+#//      Created Date :          23/01/2018
+#//      Created for Project :   FLAME
+#//
+#/////////////////////////////////////////////////////////////////////////
+
+# Install apache
+sudo apt-get update
+sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 2930ADAE8CAF5059EE73BB4B58712A2291FA4AD5
+echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.6 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.6.list
+sudo apt-get update
+sudo apt-get install -y mongodb-org
+sudo service mongod start
\ No newline at end of file
diff --git a/test/services/mongo/telegraf_template.conf b/test/services/mongo/telegraf_template.conf
new file mode 100644
index 0000000000000000000000000000000000000000..f28b2343cc2cd350dac6ac5ffc6e7a04b5d3a7b2
--- /dev/null
+++ b/test/services/mongo/telegraf_template.conf
@@ -0,0 +1,128 @@
+# Telegraf configuration
+
+# Telegraf is entirely plugin driven. All metrics are gathered from the
+# declared inputs, and sent to the declared outputs.
+
+# Plugins must be declared in here to be active.
+# To deactivate a plugin, comment out the name and any variables.
+
+# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
+# file would generate.
+
+# Global tags can be specified here in key="value" format.
+[global_tags]
+  # location of the data centre
+  location="{{LOCATION}}"
+  # media service template id
+  sfc="{{SFC_ID}}"
+  # media service instance
+  sfc_i="{{SFC_ID_INSTANCE}}"
+  # service function type
+  sf="{{SF_ID}}"
+  # service function instance id
+  sf_i="{{SF_ID_INSTANCE}}"
+  # ipendpoint id aka surrogate instance
+  ipendpoint="{{IP_ENDPOINT_ID}}"
+
+# Configuration for telegraf agent
+[agent]
+  ## Default data collection interval for all inputs
+  interval = "10s"
+  ## Rounds collection interval to 'interval'
+  ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
+  round_interval = true
+
+  ## Telegraf will cache metric_buffer_limit metrics for each output, and will
+  ## flush this buffer on a successful write.
+  metric_buffer_limit = 1000
+  ## Flush the buffer whenever full, regardless of flush_interval.
+  flush_buffer_when_full = true
+
+  ## Collection jitter is used to jitter the collection by a random amount.
+  ## Each plugin will sleep for a random time within jitter before collecting.
+  ## This can be used to avoid many plugins querying things like sysfs at the
+  ## same time, which can have a measurable effect on the system.
+  collection_jitter = "0s"
+
+  ## Default flushing interval for all outputs. You shouldn't set this below
+  ## interval. Maximum flush_interval will be flush_interval + flush_jitter
+  flush_interval = "10s"
+  ## Jitter the flush interval by a random amount. This is primarily to avoid
+  ## large write spikes for users running a large number of telegraf instances.
+  ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
+  flush_jitter = "0s"
+
+  ## Logging configuration:
+  ## Run telegraf in debug mode
+  debug = false
+  ## Run telegraf in quiet mode
+  quiet = false
+  ## Specify the log file name. The empty string means to log to stdout.
+  logfile = "/var/log/telegraf/telegraf.log"
+
+  ## Override default hostname, if empty use os.Hostname()
+  hostname = ""
+
+
+###############################################################################
+#                                  OUTPUTS                                    #
+###############################################################################
+
+# Configuration for influxdb server to send metrics to
+[[outputs.influxdb]]
+  # The full HTTP or UDP endpoint URL for your InfluxDB instance.
+  # Multiple urls can be specified but it is assumed that they are part of the same
+  # cluster, this means that only ONE of the urls will be written to each interval.
+  # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
+  urls = ["{{INFLUXDB_URL}}"] # required
+  # The target database for metrics (telegraf will create it if not exists)
+  database = "{{DATABASE_NAME}}" # required
+  # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+  # note: using second precision greatly helps InfluxDB compression
+  precision = "s"
+
+  ## Write timeout (for the InfluxDB client), formatted as a string.
+  ## If not provided, will default to 5s. 0s means no timeout (not recommended).
+  timeout = "5s"
+  # username = "telegraf"
+  # password = "metricsmetricsmetricsmetrics"
+  # Set the user agent for HTTP POSTs (can be useful for log differentiation)
+  # user_agent = "telegraf"
+  # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
+  # udp_payload = 512
+
+
+###############################################################################
+#                                  INPUTS                                     #
+###############################################################################
+# # Influx HTTP write listener
+[[inputs.http_listener]]
+  ## Address and port to host HTTP listener on
+  service_address = ":8186"
+
+  ## timeouts
+  read_timeout = "10s"
+  write_timeout = "10s"
+
+  ## HTTPS
+  #tls_cert= "/etc/telegraf/cert.pem"
+  #tls_key = "/etc/telegraf/key.pem"
+
+  ## MTLS
+  #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+
+[[inputs.mongodb]]
+  ## An array of URLs of the form:
+  ##   "mongodb://" [user ":" pass "@"] host [ ":" port]
+  ## For example:
+  ##   mongodb://user:auth_key@10.10.3.30:27017,
+  ##   mongodb://10.10.3.33:18832,
+  servers = ["mongodb://127.0.0.1:27017"]
+  gather_perdb_stats = false
+
+  ## Optional SSL Config
+  # ssl_ca = "/etc/telegraf/ca.pem"
+  # ssl_cert = "/etc/telegraf/cert.pem"
+  # ssl_key = "/etc/telegraf/key.pem"
+  ## Use SSL but skip chain & host verification
+  # insecure_skip_verify = false
\ No newline at end of file
diff --git a/test/services/nginx/install.sh b/test/services/nginx/install.sh
new file mode 100755
index 0000000000000000000000000000000000000000..318996f4ad6e3804bf5a7cf51febe59586a1af8a
--- /dev/null
+++ b/test/services/nginx/install.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+#/////////////////////////////////////////////////////////////////////////
+#//
+#// (c) University of Southampton IT Innovation Centre, 2017
+#//
+#// Copyright in this software belongs to University of Southampton
+#// IT Innovation Centre of Gamma House, Enterprise Road,
+#// Chilworth Science Park, Southampton, SO16 7NS, UK.
+#//
+#// This software may not be used, sold, licensed, transferred, copied
+#// or reproduced in whole or in part in any manner or form or in or
+#// on any media by any person other than in accordance with the terms
+#// of the Licence Agreement supplied with the software, or otherwise
+#// without the prior written consent of the copyright owners.
+#//
+#// This software is distributed WITHOUT ANY WARRANTY, without even the
+#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+#// PURPOSE, except where stated in the Licence Agreement supplied with
+#// the software.
+#//
+#//      Created By :            Michael Boniface
+#//      Created Date :          01/02/2018
+#//      Created for Project :   FLAME
+#//
+#/////////////////////////////////////////////////////////////////////////
+
+# Install nginx
+sudo apt-get update
+yes Y | sudo apt-get install nginx 
+
+# Need to set up basic stats as this not configured by default
+# http://nginx.org/en/docs/http/ngx_http_stub_status_module.html
+echo "Coping new config file into nginx"
+cd /vagrant
+sudo rm ../etc/nginx/nginx.conf
+sudo cp test/services/nginx/nginx.conf ../etc/nginx/nginx.conf
+#cat test/services/nginx/nginx.conf > ../etc/nginx/nginx.conf
+
+
+sudo nginx -s reload
+
+# start NGINX
+sudo systemctl start nginx
+# --with-http_stub_status_module
+#nginx -V 2>&1 | grep -o with-http_stub_status_module
+#/vagrant/../etc/nginx/nginx.config
+
+# systemctl status telegraf -l
+
+
+
+# dpkg -l | grep -c nginx 
\ No newline at end of file
diff --git a/test/services/nginx/nginx.conf b/test/services/nginx/nginx.conf
new file mode 100644
index 0000000000000000000000000000000000000000..1c906bec4096ecd77a0eb8d46c091288841b92d5
--- /dev/null
+++ b/test/services/nginx/nginx.conf
@@ -0,0 +1,14 @@
+
+
+events {
+  worker_connections  4096;  ## Default: 1024
+}
+http {
+	server {
+		location /nginx_status {
+		  stub_status on;
+		  access_log   off;
+		  allow all;
+	  }
+	}
+}
\ No newline at end of file
diff --git a/test/services/nginx/telegraf_template.conf b/test/services/nginx/telegraf_template.conf
new file mode 100644
index 0000000000000000000000000000000000000000..afd25312a419bf358bb8a4dfc57c1193245182ee
--- /dev/null
+++ b/test/services/nginx/telegraf_template.conf
@@ -0,0 +1,112 @@
+# Telegraf configuration
+
+# Telegraf is entirely plugin driven. All metrics are gathered from the
+# declared inputs, and sent to the declared outputs.
+
+# Plugins must be declared in here to be active.
+# To deactivate a plugin, comment out the name and any variables.
+
+# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
+# file would generate.
+
+# Global tags can be specified here in key="value" format.
+[global_tags]
+  # location of the data centre
+  location="{{LOCATION}}"
+  # media service template id
+  sfc="{{SFC_ID}}"
+  # media service instance
+  sfc_i="{{SFC_ID_INSTANCE}}"
+  # service function type
+  sf="{{SF_ID}}"
+  # service function instance id
+  sf_i="{{SF_ID_INSTANCE}}"
+  # ipendpoint id aka surrogate instance
+  ipendpoint="{{IP_ENDPOINT_ID}}"
+
+# Configuration for telegraf agent
+[agent]
+  ## Default data collection interval for all inputs
+  interval = "10s"
+  ## Rounds collection interval to 'interval'
+  ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
+  round_interval = true
+
+  ## Telegraf will cache metric_buffer_limit metrics for each output, and will
+  ## flush this buffer on a successful write.
+  metric_buffer_limit = 1000
+  ## Flush the buffer whenever full, regardless of flush_interval.
+  flush_buffer_when_full = true
+
+  ## Collection jitter is used to jitter the collection by a random amount.
+  ## Each plugin will sleep for a random time within jitter before collecting.
+  ## This can be used to avoid many plugins querying things like sysfs at the
+  ## same time, which can have a measurable effect on the system.
+  collection_jitter = "0s"
+
+  ## Default flushing interval for all outputs. You shouldn't set this below
+  ## interval. Maximum flush_interval will be flush_interval + flush_jitter
+  flush_interval = "10s"
+  ## Jitter the flush interval by a random amount. This is primarily to avoid
+  ## large write spikes for users running a large number of telegraf instances.
+  ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
+  flush_jitter = "0s"
+
+  ## Logging configuration:
+  ## Run telegraf in debug mode
+  debug = false
+  ## Run telegraf in quiet mode
+  quiet = false
+  ## Specify the log file name. The empty string means to log to stdout.
+  logfile = "/etc/telegraf/telegraf.log"
+
+  ## Override default hostname, if empty use os.Hostname()
+  hostname = ""
+
+
+###############################################################################
+#                                  OUTPUTS                                    #
+###############################################################################
+
+# Configuration for influxdb server to send metrics to
+[[outputs.influxdb]]
+  # The full HTTP or UDP endpoint URL for your InfluxDB instance.
+  # Multiple urls can be specified but it is assumed that they are part of the same
+  # cluster, this means that only ONE of the urls will be written to each interval.
+  # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
+  urls = ["{{INFLUXDB_URL}}"] # required
+  # The target database for metrics (telegraf will create it if not exists)
+  database = "{{DATABASE_NAME}}" # required
+  # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+  # note: using second precision greatly helps InfluxDB compression
+  precision = "s"
+
+  ## Write timeout (for the InfluxDB client), formatted as a string.
+  ## If not provided, will default to 5s. 0s means no timeout (not recommended).
+  timeout = "5s"
+  # username = "telegraf"
+  # password = "metricsmetricsmetricsmetrics"
+  # Set the user agent for HTTP POSTs (can be useful for log differentiation)
+  # user_agent = "telegraf"
+  # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
+  # udp_payload = 512
+
+###############################################################################
+#                                  INPUTS                                     #
+###############################################################################
+# # Influx HTTP write listener
+[[inputs.http_listener]]
+  ## Address and port to host HTTP listener on
+  service_address = ":8186"
+
+  ## timeouts
+  read_timeout = "10s"
+  write_timeout = "10s"
+
+# Read Nginx's basic status information (ngx_http_stub_status_module)
+[[inputs.nginx]]
+  ## An array of Nginx stub_status URI to gather stats.
+  urls = ["http://localhost:80/nginx_status"]
+
+  ## HTTP response timeout (default: 5s)
+#  response_timeout = "5s"
\ No newline at end of file
diff --git a/test/services/vlc/install.sh b/test/services/vlc/install.sh
new file mode 100644
index 0000000000000000000000000000000000000000..02e3b8fe01696913d8e5b65967a152bfb485eee3
--- /dev/null
+++ b/test/services/vlc/install.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+#/////////////////////////////////////////////////////////////////////////
+#//
+#// (c) University of Southampton IT Innovation Centre, 2017
+#//
+#// Copyright in this software belongs to University of Southampton
+#// IT Innovation Centre of Gamma House, Enterprise Road,
+#// Chilworth Science Park, Southampton, SO16 7NS, UK.
+#//
+#// This software may not be used, sold, licensed, transferred, copied
+#// or reproduced in whole or in part in any manner or form or in or
+#// on any media by any person other than in accordance with the terms
+#// of the Licence Agreement supplied with the software, or otherwise
+#// without the prior written consent of the copyright owners.
+#//
+#// This software is distributed WITHOUT ANY WARRANTY, without even the
+#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+#// PURPOSE, except where stated in the Licence Agreement supplied with
+#// the software.
+#//
+#//      Created By :            Michael Boniface
+#//      Created Date :          12/02/2018
+#//      Created for Project :   FLAME
+#//
+#/////////////////////////////////////////////////////////////////////////
+
+add-apt-repository -y ppa:videolan/master-daily
+apt-get update
+apt-get -y install vlc
\ No newline at end of file
diff --git a/test/services/vlc/telegraf_template.conf b/test/services/vlc/telegraf_template.conf
new file mode 100644
index 0000000000000000000000000000000000000000..3e30465f39ca7035ec8217909c9bc5d29942fa4e
--- /dev/null
+++ b/test/services/vlc/telegraf_template.conf
@@ -0,0 +1,112 @@
+# Telegraf configuration
+
+# Telegraf is entirely plugin driven. All metrics are gathered from the
+# declared inputs, and sent to the declared outputs.
+
+# Plugins must be declared in here to be active.
+# To deactivate a plugin, comment out the name and any variables.
+
+# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
+# file would generate.
+
+# Global tags can be specified here in key="value" format.
+[global_tags]
+  # location of the data centre
+  location="{{LOCATION}}"
+  # media service template id
+  sfc="{{SFC_ID}}"
+  # media service instance
+  sfc_i="{{SFC_ID_INSTANCE}}"
+  # service function type
+  sf="{{SF_ID}}"
+  # service function instance id
+  sf_i="{{SF_ID_INSTANCE}}"
+  # ipendpoint id aka surrogate instance
+  ipendpoint="{{IP_ENDPOINT_ID}}"
+
+# Configuration for telegraf agent
+[agent]
+  ## Default data collection interval for all inputs
+  interval = "10s"
+  ## Rounds collection interval to 'interval'
+  ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
+  round_interval = true
+
+  ## Telegraf will cache metric_buffer_limit metrics for each output, and will
+  ## flush this buffer on a successful write.
+  metric_buffer_limit = 1000
+  ## Flush the buffer whenever full, regardless of flush_interval.
+  flush_buffer_when_full = true
+
+  ## Collection jitter is used to jitter the collection by a random amount.
+  ## Each plugin will sleep for a random time within jitter before collecting.
+  ## This can be used to avoid many plugins querying things like sysfs at the
+  ## same time, which can have a measurable effect on the system.
+  collection_jitter = "0s"
+
+  ## Default flushing interval for all outputs. You shouldn't set this below
+  ## interval. Maximum flush_interval will be flush_interval + flush_jitter
+  flush_interval = "10s"
+  ## Jitter the flush interval by a random amount. This is primarily to avoid
+  ## large write spikes for users running a large number of telegraf instances.
+  ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
+  flush_jitter = "0s"
+
+  ## Logging configuration:
+  ## Run telegraf in debug mode
+  debug = false
+  ## Run telegraf in quiet mode
+  quiet = false
+  ## Specify the log file name. The empty string means to log to stdout.
+  logfile = "/var/log/telegraf/telegraf.log"
+
+  ## Override default hostname, if empty use os.Hostname()
+  hostname = ""
+
+
+###############################################################################
+#                                  OUTPUTS                                    #
+###############################################################################
+
+# Configuration for influxdb server to send metrics to
+[[outputs.influxdb]]
+  # The full HTTP or UDP endpoint URL for your InfluxDB instance.
+  # Multiple urls can be specified but it is assumed that they are part of the same
+  # cluster, this means that only ONE of the urls will be written to each interval.
+  # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
+  urls = ["{{INFLUXDB_URL}}"] # required
+  # The target database for metrics (telegraf will create it if not exists)
+  database = "{{DATABASE_NAME}}" # required
+  # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+  # note: using second precision greatly helps InfluxDB compression
+  precision = "s"
+
+  ## Write timeout (for the InfluxDB client), formatted as a string.
+  ## If not provided, will default to 5s. 0s means no timeout (not recommended).
+  timeout = "5s"
+  # username = "telegraf"
+  # password = "metricsmetricsmetricsmetrics"
+  # Set the user agent for HTTP POSTs (can be useful for log differentiation)
+  # user_agent = "telegraf"
+  # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
+  # udp_payload = 512
+
+
+###############################################################################
+#                                  INPUTS                                     #
+###############################################################################
+# # Influx HTTP write listener
+[[inputs.http_listener]]
+  ## Address and port to host HTTP listener on
+  service_address = ":8186"
+
+  ## timeouts
+  read_timeout = "10s"
+  write_timeout = "10s"
+
+  ## HTTPS
+  #tls_cert= "/etc/telegraf/cert.pem"
+  #tls_key = "/etc/telegraf/key.pem"
+
+  ## MTLS
+  #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
\ No newline at end of file
diff --git a/test/streaming-sim/LineProtocolGenerator.py b/test/streaming-sim/LineProtocolGenerator.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d4b07736b3fa3b318754d411aaeb1d91aa2f537
--- /dev/null
+++ b/test/streaming-sim/LineProtocolGenerator.py
@@ -0,0 +1,307 @@
+# line protocol
+
+# Method to create a full InfluxDB request statement (based on partial statement from client)
+import uuid
+from random import random, randint
+
+
+# Reports TX and RX, scaling on requested quality
+def generate_network_report(recieved_bytes, sent_bytes, time):
+    # Measurement
+    result = 'net_port_io'
+    # Tags
+    result += ',port_id=enps03 '
+    # Fields
+    result += 'RX_BYTES_PORT_M=' + str(recieved_bytes) + ","
+    result += 'TX_BYTES_PORT_M=' + str(sent_bytes)
+    # Timestamp
+    result += ' ' + str(_getNSTime(time))
+
+    # Measurement
+    #print(result)
+    return result
+
+
+# Formats VM config
+def generate_vm_config(state, cpu, mem, storage, time):
+    # metric
+    result = 'vm_res_alloc'
+    # Tags
+    result += ',vm_state=' + quote_wrap(state)
+    result += ' '
+    # Fields
+    result += 'cpu=' + str(cpu)
+    result += ',memory=' + quote_wrap(mem)
+    result += ',storage=' + quote_wrap(storage)
+
+    # Time
+    result += ' ' + str(_getNSTime(time))
+
+    print(result)
+    return result
+
+
+# Reports cpu usage, scaling on requests
+def generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, time):
+    result = 'cpu_usage'
+    # Tag
+    result += ' '
+    # field
+    result += 'cpu_usage='+str(cpu_usage)
+    result += ',cpu_active_time='+str(cpu_active_time)
+    result += ',cpu_idle_time='+str(cpu_idle_time)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
+
+
+# Reports response times, scaling on number of requests
+def generate_mpegdash_report(resource, requests, avg_response_time, peak_response_time, time):
+    # Measurement
+    result = 'mpegdash_service'
+    # Tags
+    result += ',cont_nav=\"' + str(resource) + "\" "
+    # Fields
+
+    # result += 'cont_rep=' + str(quality) + ','
+    result += 'requests=' + str(requests) + ','
+    result += 'avg_response_time=' + str(avg_response_time) + ','
+    result += 'peak_response_time=' + str(peak_response_time)
+    # Timestamp
+    result += ' ' + str(_getNSTime(time))
+    print(result)
+    return result
+
+#ipendpoint_route,ipendpoint_id,cont_nav=FQDN HTTP_REQUESTS_FQDN_M, NETWORK_FQDN_LATENCY timestamp
+def generate_ipendpoint_route(resource, requests, latency, time):
+    # Measurement
+    result = 'ipendpoint_route'
+    # Tags
+    result += ',cont_nav=\"' + str(resource) + "\" "
+    # Fields
+
+    # result += 'cont_rep=' + str(quality) + ','
+    result += 'http_requests_fqdn_m=' + str(requests) + ','
+    result += 'network_fqdn_latency=' + str(latency)
+    # Timestamp
+    result += ' ' + str(_getNSTime(time))
+    #print(result)
+    return result
+
+# Influx needs strings to be quoted, this provides a utility interface to do this
+def quote_wrap(str):
+    return "\"" + str + "\""
+
+
+# InfluxDB likes to have time-stamps in nanoseconds
+def _getNSTime(time):
+    # Convert to nano-seconds
+    timestamp = int(1000000000*time)
+    #print("timestamp", timestamp)
+    return timestamp
+
+# DEPRICATED
+# ____________________________________________________________________________
+
+# DEPRICATED: old structure, not part of new spec
+def _generateClientRequest(cReq, id, time):
+    # Tags first
+    result = 'sid="' + str(id) + '",' + cReq
+
+    # Fields
+    # No additional fields here yet
+
+    # Timestamp
+    result += ' ' + str(_getNSTime(time))
+
+    # Measurement
+    return 'request,' + result
+
+
+# Method to create a full InfluxDB response statement
+# DEPRECATED: old structure, not part of new spec
+def _generateServerResponse(reqID, quality, time, cpuUsage, qualityDifference):
+    # Tags first
+    result = ' '
+
+    # Fields
+    result += 'quality=' + str(quality) + ','
+    result += 'cpuUsage=' + str(cpuUsage) + ','
+    result += 'qualityDifference=' + str(qualityDifference) + ','
+    result += 'requestID="' + str(reqID) + '",'
+    result += 'index="' + str(uuid.uuid4()) + '"'
+
+    # Timestamp
+    result += ' ' + str(_getNSTime(time))
+
+    # Measurement
+    # print('response'+result)
+    return 'response' + result
+
+
+
+# Formats server config
+def _generateServerConfig(ID, location, cpu, mem, storage, time):
+    # metric
+    result = 'host_resource'
+    # Tags
+    result += ',slice_id=' + quote_wrap(ID)
+    result += ',location=' + quote_wrap(location)
+    result += ' '
+    # Fields
+    result += 'cpu=' + str(cpu)
+    result += ',memory=' + quote_wrap(mem)
+    result += ',storage=' + quote_wrap(storage)
+
+    # Time
+    result += ' ' + str(_getNSTime(time))
+
+    print(result)
+    return result
+
+
+
+# Format port config
+def _configure_port(port_id, state, rate, time):
+    # metric
+    result = 'net_port_config '
+    # Fields
+    result += 'port_id=' + quote_wrap('enps' + port_id)
+    result += ',port_state=' + quote_wrap(state)
+    result += ',tx_constraint=' + quote_wrap(rate)
+    result += ' '
+
+    # Time
+    result += ' ' + str(_getNSTime(time))
+
+    print(result)
+    return result
+
+
+# Format service function config
+def _configure_service_function(state, max_connected_clients):
+    # measurement
+    result = 'mpegdash_service_config'
+    # tags
+    result += ',service_state='+quote_wrap(state)
+    result += ' '
+    # fields
+    result += 'max_connected_clients='+str(max_connected_clients)
+
+    return result
+
+
+
+# Reports memory usage, scaling on requests
+def generate_mem_report(requests, total_mem, time):
+    # Measurement
+    result = 'mem'
+    result += ' '
+    # field
+    used = randint(0, min(100,5*requests))
+    available = 100-used
+    result += 'available_percent='+str(available)
+    result += ',used_percent='+str(used)
+    result += ',total='+str(total_mem)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
+
+
+# Formats compute node config
+def generate_compute_node_config(slice_id, location, node_id, cpus, mem, storage, time):
+    # Measurement
+    result = 'compute_node_config'
+    # CommonContext Tag
+    result += ',slide_id='+quote_wrap(slice_id)
+    # Tag
+    result += ',location='+quote_wrap(location)
+    result += ',comp_node_id='+quote_wrap(node_id)
+    result += ' '
+    # field
+    result += 'cpus='+str(cpus)
+    result += ',memory='+str(mem)
+    result += ',storage='+str(storage)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
+
+
+# Formats network resource config
+def generate_network_resource_config(slice_id, network_id, bandwidth, time):
+    # Measurement
+    result = 'network_resource_config'
+    # Meta Tag
+    result += ',slice_id='+quote_wrap(slice_id)
+    # Tag
+    result += 'network_id='+quote_wrap(network_id)
+    result += ' '
+    # field
+    result += 'bandwidth='+str(bandwidth)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
+
+
+# Formats network interface config
+def generate_network_interface_config(slice_id, comp_node_id, port_id, rx_constraint, tx_constraint, time):
+    # Measurement
+    result = 'network_interface_config'
+    # Meta Tag
+    result += ',slice_id'+quote_wrap(slice_id)
+    # Tags
+    result += ',comp_node_id='+quote_wrap(comp_node_id)
+    result += ',port_id='+quote_wrap(port_id)
+    result += ' '
+    # field
+    result += 'rx_constraint='+str(rx_constraint)
+    result += ',tx_constraint='+str(tx_constraint)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
+
+
+# Format SF instance config
+def generate_sf_instance_surrogate_config(loc, sfc, sfc_i, sf_package, sf_i, cpus, mem, storage, time):
+    # Measurement
+    result = 'sf_instance_surrogate_config'
+    # Meta Tag
+    result += ',location'+quote_wrap(loc)
+    result += ',sfc'+quote_wrap(sfc)
+    result += ',sfc_i'+quote_wrap(sfc_i)
+    result += ',sf_package'+quote_wrap(sf_package)
+    result += ',sf_i'+quote_wrap(sf_i)
+    result += ' '
+    # field
+    result += 'cpus='+str(cpus)
+    result += ',memory='+str(mem)
+    result += ',storage='+str(storage)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
+
+
+# Formats context container as part of other line protocol generators
+def service_function_measurement(measurement, service_function_context):
+    result = measurement
+    result += ',sfc'+quote_wrap(service_function_context.sfc)
+    result += ',sfc_i'+quote_wrap(service_function_context.sfc_i)
+    result += ',sf_package'+quote_wrap(service_function_context.sf_package)
+    result += ',sf_i'+quote_wrap(service_function_context.sf_i)
+
+    return result
+
+
+
diff --git a/test/streaming-sim/StreamingSim.py b/test/streaming-sim/StreamingSim.py
new file mode 100644
index 0000000000000000000000000000000000000000..0182e75dc99b9e9f28ffad87a0d4d40e5929d67b
--- /dev/null
+++ b/test/streaming-sim/StreamingSim.py
@@ -0,0 +1,203 @@
+import LineProtocolGenerator as lp
+import time
+import urllib.parse
+import urllib.request
+import sys
+import random
+
+# Simulation parameters
+TICK_TIME = 1
+DEFAULT_REQUEST_RATE_INC = 1
+DEFAULT_REQUEST_RATE_INC_PERIOD = 10 
+SIMULATION_TIME_SEC = 60*60
+
+# CLMC parameters
+INFLUX_DB_URL = 'http://192.168.50.10:8086'
+AGENT_URL1 = 'http://192.168.50.11:8186'
+AGENT_URL2 = 'http://192.168.50.12:8186'
+
+# Simulator for services
+class sim:
+    def __init__(self, influx_url):
+        # We don't need this as the db is CLMC metrics
+        self.influx_db = 'CLMCMetrics'
+        self.influx_url = influx_url
+        # Teardown DB from previous sim and bring it back up
+        self._deleteDB()
+        self._createDB()
+
+
+    def run(self, simulation_length_seconds):
+        start_time = time.time()-SIMULATION_TIME_SEC
+        sim_time = start_time
+
+        # segment_size : the length of video requested at a time
+        # bit_rate: MPEG-2 High 1080p 25fps = 80Mbps
+        ip_endpoints = [{'agent_url': AGENT_URL1, 'location': 'DC1', 'cpu': 16,
+                        'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0,
+                        'segment_size': 2, 'video_bit_rate': 80, 'packet_size': 1500},
+                        {'agent_url': AGENT_URL2, 'location': 'DC2', 'cpu': 4, 
+                        'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0, 
+                        'segment_size': 2, 'video_bit_rate': 80, 'packet_size': 1500}
+                        ]
+
+        # Simulate configuration of the ipendpoints
+        # endpoint state->mu, sigma, secs normal distribution
+        config_delay_dist = {"placing": [10, 0.68], "booting": [10, 0.68],"connecting": [10, 0.68]}
+
+        # Place endpoints
+        max_delay = 0              
+        for ip_endpoint in ip_endpoints:
+            delay_time = self._changeVMState(sim_time, ip_endpoint, config_delay_dist['placing'][0], config_delay_dist['placing'][0]*config_delay_dist['placing'][1], 'placing', 'placed')
+            if delay_time > max_delay:
+                max_delay = delay_time
+        sim_time +=max_delay
+
+        # Boot endpoints
+        max_delay = 0        
+        for ip_endpoint in ip_endpoints:
+            delay_time = self._changeVMState(sim_time, ip_endpoint, config_delay_dist['booting'][0], config_delay_dist['booting'][0]*config_delay_dist['booting'][1], 'booting', 'booted')
+            if delay_time > max_delay:
+                max_delay = delay_time            
+        sim_time +=max_delay
+
+        # Connect endpoints
+        max_delay = 0     
+        for ip_endpoint in ip_endpoints:
+            delay_time = self._changeVMState(sim_time, ip_endpoint, config_delay_dist['connecting'][0], config_delay_dist['connecting'][0]*config_delay_dist['connecting'][1], 'connecting', 'connected')
+            if delay_time > max_delay:
+                max_delay = delay_time
+        sim_time +=max_delay
+   
+        request_arrival_rate_inc = DEFAULT_REQUEST_RATE_INC
+        request_queue = 0
+        inc_period_count = 0
+        for i in range(simulation_length_seconds):        
+            for ip_endpoint in ip_endpoints:
+                request_processing_time = 0
+                cpu_time_available = 0
+                requests_processed = 0
+                max_requests_processed = 0
+                cpu_active_time = 0
+                cpu_idle_time = 0
+                cpu_usage = 0
+                cpu_load_time = 0
+                avg_response_time = 0
+                peak_response_time = 0
+
+                # linear inc to arrival rate
+                if inc_period_count >= DEFAULT_REQUEST_RATE_INC_PERIOD:
+                    ip_endpoint['request_arrival_rate'] += request_arrival_rate_inc
+                    inc_period_count = 0
+                else:
+                    inc_period_count += 1
+                # add new requests to the queue
+                ip_endpoint['request_queue'] += ip_endpoint['request_arrival_rate']
+
+                # time to process one second of video (mS) in the current second
+                request_processing_time = int(random.normalvariate(10, 10*0.68))
+                if request_processing_time <= 10:
+                    request_processing_time = 10
+                # time depends on the length of the segments in seconds
+                request_processing_time *= ip_endpoint['segment_size']
+
+                # amount of cpu time (mS) per tick
+                cpu_time_available = ip_endpoint['cpu']*TICK_TIME*1000
+                max_requests_processed = int(cpu_time_available/request_processing_time)
+                # calc how many requests processed
+                if ip_endpoint['request_queue'] <= max_requests_processed:
+                    # processed all of the requests
+                    requests_processed = ip_endpoint['request_queue']
+                else:
+                    # processed the maxmum number of requests
+                    requests_processed = max_requests_processed
+
+                # calculate cpu usage
+                cpu_active_time = int(requests_processed*request_processing_time)
+                cpu_idle_time = int(cpu_time_available-cpu_active_time)
+                cpu_usage = cpu_active_time/cpu_time_available
+                self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, sim_time))
+
+                # calc network usage metrics
+                bytes_rx = 2048*requests_processed           
+                bytes_tx = int(ip_endpoint['video_bit_rate']/8*1000000*requests_processed*ip_endpoint['segment_size'])
+                self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_network_report(bytes_rx, bytes_tx, sim_time))                
+
+                # time to process all of the requests in the queue
+                peak_response_time = ip_endpoint['request_queue']*request_processing_time/ip_endpoint['cpu']
+                # mid-range 
+                avg_response_time = (peak_response_time+request_processing_time)/2
+                self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_mpegdash_report('http://localhost/server-status?auto', ip_endpoint['request_arrival_rate'], avg_response_time, peak_response_time, sim_time))
+
+                # need to calculate this but sent at 5mS for now
+                network_request_delay = 0.005
+
+                # calculate network response delays (2km link, 100Mbps)
+                network_response_delay = self._calcNetworkDelay(2000, 100, ip_endpoint['packet_size'], ip_endpoint['video_bit_rate'], ip_endpoint['segment_size'])
+
+                e2e_delay = network_request_delay + (avg_response_time/1000) + network_response_delay
+
+                self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_ipendpoint_route('http://localhost/server-status?auto', ip_endpoint['request_arrival_rate'], e2e_delay, sim_time))
+
+                # remove requests processed off the queue
+                ip_endpoint['request_queue'] -= int(requests_processed)            
+
+            sim_time += TICK_TIME
+        end_time = sim_time
+        print("Simulation Finished. Start time {0}. End time {1}. Total time {2}".format(start_time,end_time,end_time-start_time))
+
+    # distance metres
+    # bandwidth Mbps
+    # package size bytes
+    # tx_video_bit_rate bp/sec
+    # segment size sec
+    def _calcNetworkDelay(self, distance, bandwidth, packet_size, tx_video_bit_rate, segment_size):
+        response_delay = 0
+
+        # propogation delay = distance/speed () (e.g 2000 metres * 2*10^8 for optical fibre)
+        propogation_delay = distance/(2*100000000)
+        # packetisation delay = ip packet size (bits)/tx rate (e.g. 100Mbp with  0% packet loss)
+        packetisation_delay = (packet_size*8)/(bandwidth*1000000)
+    #    print('packetisation_delay:', packetisation_delay)   
+        # total number of packets to be sent
+        packets = (tx_video_bit_rate*1000000)/(packet_size*8)
+     #   print('packets:', packets)        
+        response_delay = packets*(propogation_delay+packetisation_delay)
+      #  print('response_delay:', response_delay)
+
+        return response_delay     
+
+    def _changeVMState(self, sim_time, ip_endpoint, mu, sigma, transition_state, next_state):
+        delay_time = 0
+    
+        self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config(transition_state, ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], sim_time))   
+        
+        delay_time = random.normalvariate(mu, sigma)        
+        
+        self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config(next_state, ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], sim_time+delay_time))
+
+        return delay_time
+
+    def _createDB(self):
+        self._sendInfluxQuery(self.influx_url, 'CREATE DATABASE ' + self.influx_db)
+
+
+    def _deleteDB(self):
+        self._sendInfluxQuery(self.influx_url, 'DROP DATABASE ' + self.influx_db)
+
+
+    def _sendInfluxQuery(self, url, query):
+        query = urllib.parse.urlencode({'q': query})
+        query = query.encode('ascii')
+        req = urllib.request.Request(url + '/query ', query)
+        urllib.request.urlopen(req)
+
+    def _sendInfluxData(self, url, data):
+        data = data.encode()
+        header = {'Content-Type': 'application/octet-stream'}
+        req = urllib.request.Request(url + '/write?db=' + self.influx_db, data, header)
+        urllib.request.urlopen(req)  
+
+simulator = sim(INFLUX_DB_URL)
+simulator.run(SIMULATION_TIME_SEC)
+
diff --git a/test/streaming-sim/VerifySimResults.py b/test/streaming-sim/VerifySimResults.py
new file mode 100644
index 0000000000000000000000000000000000000000..2060a23d578d2b8cb278f9678a98b8e8430c92d5
--- /dev/null
+++ b/test/streaming-sim/VerifySimResults.py
@@ -0,0 +1,66 @@
+import sys
+import urllib.parse
+import urllib.request
+
+queryReference = {
+    "cpu_usage" : "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"cpu_usage\"",
+    "ipendpoint_route" : "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"ipendpoint_route\"",
+    "mpegdash_service" : "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"mpegdash_service\"",
+    "net_port_io" : "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"net_port_io\"",
+    "vm_res_alloc" : "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"vm_res_alloc\""
+}
+
+resultReference = { 
+    "cpu_usage" : "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu_usage\",\"columns\":[\"time\",\"count_cpu_active_time\",\"count_cpu_idle_time\",\"count_cpu_usage\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200,7200]]}]}]}",
+    "ipendpoint_route" : "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"ipendpoint_route\",\"columns\":[\"time\",\"count_http_requests_fqdn_m\",\"count_network_fqdn_latency\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200]]}]}]}",
+    "mpegdash_service" : "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"mpegdash_service\",\"columns\":[\"time\",\"count_avg_response_time\",\"count_peak_response_time\",\"count_requests\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200,7200]]}]}]}",
+    "net_port_io" : "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"net_port_io\",\"columns\":[\"time\",\"count_RX_BYTES_PORT_M\",\"count_TX_BYTES_PORT_M\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200]]}]}]}",
+    "vm_res_alloc" : "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"vm_res_alloc\",\"columns\":[\"time\",\"count_cpu\",\"count_memory\",\"count_storage\"],\"values\":[[\"1970-01-01T00:00:00Z\",12,12,12]]}]}]}"
+}
+
+def checkResult( query, queryResult ):
+    result = False
+
+    if query != None and queryResult != None:
+        if ( query in resultReference ):
+            if ( resultReference[query] == queryResult ):            
+                print ( "Result correct" )
+                result = True
+            else:
+                print ( "Incorrect result for query: " + query )
+                print ( "Expected = " + resultReference[query] )
+                print ( "Result   = " + queryResult )
+        else:
+            print( "Could not find query result for: " + query )
+    else:
+        print( "Could not check result: invalid parameters" )
+
+    return result
+
+def sendInfluxQuery( url, query ):
+    query = urllib.parse.urlencode( {'q': query} )
+    query = query.encode( 'ascii' )
+    req = urllib.request.Request( url + '/query ', query )
+    result = urllib.request.urlopen( req )
+
+    return result.read().decode("utf-8").strip()
+
+# Entry point
+# ---------------------------------------------------------------------------------------
+testFailed = False
+
+for key in list( queryReference ):
+    query = queryReference[key]
+    result = sendInfluxQuery( "http://localhost:8086", query )
+
+    if checkResult( key, result ) == False:
+        testFailed = True
+        break
+
+if testFailed :
+    print( "Failed simulation result test" )
+    sys.exit( 1 )
+else:
+    print( "Test succeeded" )
+
+sys.exit( 0 )
\ No newline at end of file
diff --git a/test/streaming/dashboard.json b/test/streaming/dashboard.json
new file mode 100644
index 0000000000000000000000000000000000000000..f33adbc57b53ef08d15391b372b4b7aa3e63845c
--- /dev/null
+++ b/test/streaming/dashboard.json
@@ -0,0 +1 @@
+{"id":1,"cells":[{"i":"657dc6a9-1359-4f28-87c1-8174cab033e2","x":4,"y":2,"w":4,"h":3,"name":"Apache Endpoint 2 Request rate \u0026 Response time","queries":[{"query":"SELECT mean(\"ReqPerSec\") AS \"mean_ReqPerSec\" FROM \"CLMCMetrics\".\"autogen\".\"apache\" WHERE time \u003e now() - 1h AND \"ipendpoint\"='adaptive_streaming_I1_apache2' GROUP BY time(10s)","label":"apache.ReqPerSec","queryConfig":{"database":"CLMCMetrics","measurement":"apache","retentionPolicy":"autogen","fields":[{"field":"ReqPerSec","funcs":["mean"]}],"tags":{"ipendpoint":["adaptive_streaming_I1_apache2"]},"groupBy":{"time":"10s","tags":[]},"areTagsAccepted":true,"rawText":null,"range":{"upper":"","lower":"now() - 1h"}}},{"query":"SELECT mean(\"response_time\") AS \"mean_response_time\" FROM \"CLMCMetrics\".\"autogen\".\"net_response\" WHERE time \u003e now() - 1h AND \"ipendpoint\"='adaptive_streaming_I1_apache2' GROUP BY time(10s)","label":"net_response.response_time","queryConfig":{"database":"CLMCMetrics","measurement":"net_response","retentionPolicy":"autogen","fields":[{"field":"response_time","funcs":["mean"]}],"tags":{"ipendpoint":["adaptive_streaming_I1_apache2"]},"groupBy":{"time":"10s","tags":[]},"areTagsAccepted":true,"rawText":null,"range":{"upper":"","lower":"now() - 1h"}}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/5/cells/657dc6a9-1359-4f28-87c1-8174cab033e2"}},{"i":"998dac9c-fdfb-43cb-905c-650082acf0fe","x":0,"y":2,"w":4,"h":3,"name":"Apache Endpoint 1 Request rate \u0026 Response time","queries":[{"query":"SELECT mean(\"ReqPerSec\") AS \"mean_ReqPerSec\" FROM \"CLMCMetrics\".\"autogen\".\"apache\" WHERE time \u003e now() - 1h AND \"ipendpoint\"='adaptive_streaming_I1_apache1' GROUP BY time(10s)","label":"apache.ReqPerSec","queryConfig":{"database":"CLMCMetrics","measurement":"apache","retentionPolicy":"autogen","fields":[{"field":"ReqPerSec","funcs":["mean"]}],"tags":{"ipendpoint":["adaptive_streaming_I1_apache1"]},"groupBy":{"time":"10s","tags":[]},"areTagsAccepted":true,"rawText":null,"range":{"upper":"","lower":"now() - 1h"}}},{"query":"SELECT mean(\"response_time\") AS \"mean_response_time\" FROM \"CLMCMetrics\".\"autogen\".\"net_response\" WHERE time \u003e now() - 1h AND \"ipendpoint\"='adaptive_streaming_I1_apache1' GROUP BY time(10s)","label":"net_response.response_time","queryConfig":{"database":"CLMCMetrics","measurement":"net_response","retentionPolicy":"autogen","fields":[{"field":"response_time","funcs":["mean"]}],"tags":{"ipendpoint":["adaptive_streaming_I1_apache1"]},"groupBy":{"time":"10s","tags":[]},"areTagsAccepted":true,"rawText":null,"range":{"upper":"","lower":"now() - 1h"}}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/5/cells/998dac9c-fdfb-43cb-905c-650082acf0fe"}},{"i":"31c0e989-99a8-4340-bbd9-6e5a83826324","x":4,"y":5,"w":4,"h":4,"name":"Apache Endpoint 2 CPU Load \u0026 connection count","queries":[{"query":"SELECT mean(\"CPULoad\") AS \"mean_CPULoad\" FROM \"CLMCMetrics\".\"autogen\".\"apache\" WHERE time \u003e now() - 1h AND \"ipendpoint\"='adaptive_streaming_I1_apache2' GROUP BY time(10s)","label":"apache.CPULoad","queryConfig":{"database":"CLMCMetrics","measurement":"apache","retentionPolicy":"autogen","fields":[{"field":"CPULoad","funcs":["mean"]}],"tags":{"ipendpoint":["adaptive_streaming_I1_apache2"]},"groupBy":{"time":"10s","tags":[]},"areTagsAccepted":true,"rawText":null,"range":{"upper":"","lower":"now() - 1h"}}},{"query":"SELECT mean(\"ConnsTotal\") AS \"mean_ConnsTotal\" FROM \"CLMCMetrics\".\"autogen\".\"apache\" WHERE time \u003e now() - 1h AND \"host\"='apache2' GROUP BY time(10s)","label":"apache.ConnsTotal","queryConfig":{"database":"CLMCMetrics","measurement":"apache","retentionPolicy":"autogen","fields":[{"field":"ConnsTotal","funcs":["mean"]}],"tags":{"host":["apache2"]},"groupBy":{"time":"10s","tags":[]},"areTagsAccepted":true,"rawText":null,"range":{"upper":"","lower":"now() - 1h"}}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/5/cells/31c0e989-99a8-4340-bbd9-6e5a83826324"}},{"i":"5836bf44-5af2-4dcd-8882-a4b658c58c9e","x":0,"y":5,"w":4,"h":4,"name":"Apache Endpoint 1 CPU Load \u0026 connection count","queries":[{"query":"SELECT mean(\"CPULoad\") AS \"mean_CPULoad\" FROM \"CLMCMetrics\".\"autogen\".\"apache\" WHERE time \u003e now() - 1h AND \"ipendpoint\"='adaptive_streaming_I1_apache1' GROUP BY time(10s)","label":"apache.CPULoad","queryConfig":{"database":"CLMCMetrics","measurement":"apache","retentionPolicy":"autogen","fields":[{"field":"CPULoad","funcs":["mean"]}],"tags":{"ipendpoint":["adaptive_streaming_I1_apache1"]},"groupBy":{"time":"10s","tags":[]},"areTagsAccepted":true,"rawText":null,"range":{"upper":"","lower":"now() - 1h"}}},{"query":"SELECT mean(\"ConnsTotal\") AS \"mean_ConnsTotal\" FROM \"CLMCMetrics\".\"autogen\".\"apache\" WHERE time \u003e now() - 1h AND \"ipendpoint\"='adaptive_streaming_I1_apache1' GROUP BY time(10s)","label":"apache.ConnsTotal","queryConfig":{"database":"CLMCMetrics","measurement":"apache","retentionPolicy":"autogen","fields":[{"field":"ConnsTotal","funcs":["mean"]}],"tags":{"ipendpoint":["adaptive_streaming_I1_apache1"]},"groupBy":{"time":"10s","tags":[]},"areTagsAccepted":true,"rawText":null,"range":{"upper":"","lower":"now() - 1h"}}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/5/cells/5836bf44-5af2-4dcd-8882-a4b658c58c9e"}},{"i":"d3730e16-9c22-43d8-9f37-a98f76b04133","x":0,"y":0,"w":8,"h":2,"name":"Total Apache connections","queries":[{"query":"SELECT mean(\"ConnsTotal\") AS \"mean_ConnsTotal\" FROM \"CLMCMetrics\".\"autogen\".\"apache\" WHERE time \u003e now() - 1h GROUP BY time(10s)","label":"apache.ConnsTotal","queryConfig":{"database":"CLMCMetrics","measurement":"apache","retentionPolicy":"autogen","fields":[{"field":"ConnsTotal","funcs":["mean"]}],"tags":{},"groupBy":{"time":"10s","tags":[]},"areTagsAccepted":false,"rawText":null,"range":{"upper":"","lower":"now() - 1h"}}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/5/cells/d3730e16-9c22-43d8-9f37-a98f76b04133"}}],"templates":[],"name":"Adaptive Streaming Experiment Dashboard","links":{"self":"/chronograf/v1/dashboards/5","cells":"/chronograf/v1/dashboards/5/cells","templates":"/chronograf/v1/dashboards/5/templates"}}
\ No newline at end of file
diff --git a/test/streaming/influx.json b/test/streaming/influx.json
new file mode 100644
index 0000000000000000000000000000000000000000..34bb14a56b2cad467d93d5b8451804a85ebbc707
--- /dev/null
+++ b/test/streaming/influx.json
@@ -0,0 +1,7 @@
+{
+    "id": "1",
+    "name": "Influx 1",
+    "url": "http://localhost:8086",
+    "default": true,
+    "telegraf": "CLMCMetrics"
+}
\ No newline at end of file
diff --git a/test/streaming/kapacitor.conf b/test/streaming/kapacitor.conf
new file mode 100644
index 0000000000000000000000000000000000000000..e8332d6297a5ba109fff0a191ff5a50d9ade15fd
--- /dev/null
+++ b/test/streaming/kapacitor.conf
@@ -0,0 +1,699 @@
+# The hostname of this node.
+# Must be resolvable by any configured InfluxDB hosts.
+hostname = "localhost"
+# Directory for storing a small amount of metadata about the server.
+data_dir = "/var/lib/kapacitor"
+
+# Do not apply configuration overrides during startup.
+# Useful if the configuration overrides cause Kapacitor to fail startup.
+# This option is intended as a safe guard and should not be needed in practice.
+skip-config-overrides = false
+
+# Default retention-policy, if a write is made to Kapacitor and
+# it does not have a retention policy associated with it,
+# then the retention policy will be set to this value
+default-retention-policy = ""
+
+[http]
+  # HTTP API Server for Kapacitor
+  # This server is always on,
+  # it serves both as a write endpoint
+  # and as the API endpoint for all other
+  # Kapacitor calls.
+  bind-address = ":9092"
+  log-enabled = true
+  write-tracing = false
+  pprof-enabled = false
+  https-enabled = false
+  https-certificate = "/etc/ssl/kapacitor.pem"
+
+[config-override]
+  # Enable/Disable the service for overridding configuration via the HTTP API.
+  enabled = true
+
+[logging]
+    # Destination for logs
+    # Can be a path to a file or 'STDOUT', 'STDERR'.
+    file = "/var/log/kapacitor/kapacitor.log"
+    # Logging level can be one of:
+    # DEBUG, INFO, ERROR
+    # HTTP logging can be disabled in the [http] config section.
+    level = "INFO"
+
+[load]
+  # Enable/Disable the service for loading tasks/templates/handlers
+  # from a directory
+  enabled = true
+  # Directory where task/template/handler files are set
+  dir = "/etc/kapacitor/load"
+
+
+[replay]
+  # Where to store replay files, aka recordings.
+  dir = "/var/lib/kapacitor/replay"
+
+[task]
+  # Where to store the tasks database
+  # DEPRECATED: This option is not needed for new installations.
+  # It is only used to determine the location of the task.db file
+  # for migrating to the new `storage` service.
+  dir = "/var/lib/kapacitor/tasks"
+  # How often to snapshot running task state.
+  snapshot-interval = "60s"
+
+[storage]
+  # Where to store the Kapacitor boltdb database
+  boltdb = "/var/lib/kapacitor/kapacitor.db"
+
+[deadman]
+  # Configure a deadman's switch
+  # Globally configure deadman's switches on all tasks.
+  # NOTE: for this to be of use you must also globally configure at least one alerting method.
+  global = false
+  # Threshold, if globally configured the alert will be triggered if the throughput in points/interval is <= threshold.
+  threshold = 0.0
+  # Interval, if globally configured the frequency at which to check the throughput.
+  interval = "10s"
+  # Id -- the alert Id, NODE_NAME will be replaced with the name of the node being monitored.
+  id = "node 'NODE_NAME' in task '{{ .TaskName }}'"
+  # The message of the alert. INTERVAL will be replaced by the interval.
+  message = "{{ .ID }} is {{ if eq .Level \"OK\" }}alive{{ else }}dead{{ end }}: {{ index .Fields \"collected\" | printf \"%0.3f\" }} points/INTERVAL."
+
+
+# Multiple InfluxDB configurations can be defined.
+# Exactly one must be marked as the default.
+# Each one will be given a name and can be referenced in batch queries and InfluxDBOut nodes.
+[[influxdb]]
+  # Connect to an InfluxDB cluster
+  # Kapacitor can subscribe, query and write to this cluster.
+  # Using InfluxDB is not required and can be disabled.
+  enabled = true
+  default = true
+  name = "localhost"
+  urls = ["http://localhost:8086"]
+  username = ""
+  password = ""
+  timeout = 0
+  # Absolute path to pem encoded CA file.
+  # A CA can be provided without a key/cert pair
+  #   ssl-ca = "/etc/kapacitor/ca.pem"
+  # Absolutes paths to pem encoded key and cert files.
+  #   ssl-cert = "/etc/kapacitor/cert.pem"
+  #   ssl-key = "/etc/kapacitor/key.pem"
+
+  # Do not verify the TLS/SSL certificate.
+  # This is insecure.
+  insecure-skip-verify = false
+
+  # Maximum time to try and connect to InfluxDB during startup
+  startup-timeout = "5m"
+
+  # Turn off all subscriptions
+  disable-subscriptions = false
+
+  # Subscription mode is either "cluster" or "server"
+  subscription-mode = "cluster"
+
+  # Which protocol to use for subscriptions
+  # one of 'udp', 'http', or 'https'.
+  subscription-protocol = "http"
+
+  # Subscriptions resync time interval
+  # Useful if you want to subscribe to new created databases
+  # without restart Kapacitord
+  subscriptions-sync-interval = "1m0s"
+
+  # Override the global hostname option for this InfluxDB cluster.
+  # Useful if the InfluxDB cluster is in a separate network and
+  # needs special config to connect back to this Kapacitor instance.
+  # Defaults to `hostname` if empty.
+  kapacitor-hostname = ""
+
+  # Override the global http port option for this InfluxDB cluster.
+  # Useful if the InfluxDB cluster is in a separate network and
+  # needs special config to connect back to this Kapacitor instance.
+  # Defaults to the port from `[http] bind-address` if 0.
+  http-port = 0
+
+  # Host part of a bind address for UDP listeners.
+  # For example if a UDP listener is using port 1234
+  # and `udp-bind = "hostname_or_ip"`,
+  # then the UDP port will be bound to `hostname_or_ip:1234`
+  # The default empty value will bind to all addresses.
+  udp-bind = ""
+  # Subscriptions use the UDP network protocl.
+  # The following options of for the created UDP listeners for each subscription.
+  # Number of packets to buffer when reading packets off the socket.
+  udp-buffer = 1000
+  # The size in bytes of the OS read buffer for the UDP socket.
+  # A value of 0 indicates use the OS default.
+  udp-read-buffer = 0
+
+  [influxdb.subscriptions]
+    # Set of databases and retention policies to subscribe to.
+    # If empty will subscribe to all, minus the list in
+    # influxdb.excluded-subscriptions
+    #
+    # Format
+    # db_name = <list of retention policies>
+    #
+    # Example:
+    # my_database = [ "default", "longterm" ]
+  [influxdb.excluded-subscriptions]
+    # Set of databases and retention policies to exclude from the subscriptions.
+    # If influxdb.subscriptions is empty it will subscribe to all
+    # except databases listed here.
+    #
+    # Format
+    # db_name = <list of retention policies>
+    #
+    # Example:
+    # my_database = [ "default", "longterm" ]
+
+[kubernetes]
+  # Enable/Disable the kubernetes service.
+  # Needed by the k8sAutoscale TICKscript node.
+  enabled = false
+  # There are several ways to connect to the kubernetes API servers:
+  #
+  # Via the proxy, start the proxy via the `kubectl proxy` command:
+  #   api-servers = ["http://localhost:8001"]
+  #
+  # From within the cluster itself, in which case
+  # kubernetes secrets and DNS services are used
+  # to determine the needed configuration.
+  #   in-cluster = true
+  #
+  # Direct connection, in which case you need to know
+  # the URL of the API servers,  the authentication token and
+  # the path to the ca cert bundle.
+  # These value can be found using the `kubectl config view` command.
+  #   api-servers = ["http://192.168.99.100:8443"]
+  #   token = "..."
+  #   ca-path = "/path/to/kubernetes/ca.crt"
+  #
+  # Kubernetes can also serve as a discoverer for scrape targets.
+  # In that case the type of resources to discoverer must be specified.
+  # Valid values are: "node", "pod", "service", and "endpoint".
+  #   resource = "pod"
+
+
+
+[smtp]
+  # Configure an SMTP email server
+  # Will use TLS and authentication if possible
+  # Only necessary for sending emails from alerts.
+  enabled = false
+  host = "localhost"
+  port = 25
+  username = ""
+  password = ""
+  # From address for outgoing mail
+  from = ""
+  # List of default To addresses.
+  # to = ["oncall@example.com"]
+
+  # Skip TLS certificate verify when connecting to SMTP server
+  no-verify = false
+  # Close idle connections after timeout
+  idle-timeout = "30s"
+
+  # If true the all alerts will be sent via Email
+  # without explicitly marking them in the TICKscript.
+  global = false
+  # Only applies if global is true.
+  # Sets all alerts in state-changes-only mode,
+  # meaning alerts will only be sent if the alert state changes.
+  state-changes-only = false
+
+[snmptrap]
+  # Configure an SNMP trap server
+  enabled = false
+  # The host:port address of the SNMP trap server
+  addr = "localhost:162"
+  # The community to use for traps
+  community = "kapacitor"
+  # Number of retries when sending traps
+  retries = 1
+
+
+[opsgenie]
+    # Configure OpsGenie with your API key and default routing key.
+    enabled = false
+    # Your OpsGenie API Key.
+    api-key = ""
+    # Default OpsGenie teams, can be overridden per alert.
+    # teams = ["team1", "team2"]
+    # Default OpsGenie recipients, can be overridden per alert.
+    # recipients = ["recipient1", "recipient2"]
+    # The OpsGenie API URL should not need to be changed.
+    url = "https://api.opsgenie.com/v1/json/alert"
+    # The OpsGenie Recovery URL, you can change this
+    # based on which behavior you want a recovery to
+    # trigger (Add Notes, Close Alert, etc.)
+    recovery_url = "https://api.opsgenie.com/v1/json/alert/note"
+    # If true then all alerts will be sent to OpsGenie
+    # without explicitly marking them in the TICKscript.
+    # The team and recipients can still be overridden.
+    global = false
+
+[victorops]
+  # Configure VictorOps with your API key and default routing key.
+  enabled = false
+  # Your VictorOps API Key.
+  api-key = ""
+  # Default VictorOps routing key, can be overridden per alert.
+  routing-key = ""
+  # The VictorOps API URL should not need to be changed.
+  url = "https://alert.victorops.com/integrations/generic/20131114/alert"
+  # If true the all alerts will be sent to VictorOps
+  # without explicitly marking them in the TICKscript.
+  # The routing key can still be overridden.
+  global = false
+  # Use JSON for the "data" field
+  # New installations will want to set this to true as it makes
+  # the data that triggered the alert available within VictorOps.
+  # The default is "false" for backwards compatibility reasons.
+  # json-data = false
+
+[pagerduty]
+  # Configure PagerDuty.
+  enabled = false
+  # Your PagerDuty Service Key.
+  service-key = ""
+  # The PagerDuty API URL should not need to be changed.
+  url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
+  # If true the all alerts will be sent to PagerDuty
+  # without explicitly marking them in the TICKscript.
+  global = false
+
+[pushover]
+  # Configure Pushover.
+  enabled = false
+  # Your Pushover API token.
+  token = ""
+  # Your Pushover USER_TOKEN.
+  user-key = ""
+  # The URL for the Pushover API.
+  url = "https://api.pushover.net/1/messages.json"
+
+##########################################
+# Configure Alert POST request Endpoints
+
+# As ENV variables:
+# KAPACITOR_HTTPPOST_0_ENDPOINT = "example"
+# KAPACITOR_HTTPPOST_0_URL = "http://example.com"
+# KAPACITOR_HTTPPOST_0_HEADERS_Example = "header"
+
+# [[httppost]]
+#   endpoint = "example"
+#   url = "http://example.com"
+#   headers = { Example = "your-key" }
+#   basic-auth = { username = "my-user", password = "my-pass" }
+#
+#   # Provide an alert template for constructing a custom HTTP body.
+#   # Alert templates are only used with post alert handlers as they consume alert data.
+#   # The template uses https://golang.org/pkg/text/template/ and has access to the following fields:
+#   #    * .ID - The unique ID for this alert
+#   #    * .Message - The message of the alert
+#   #    * .Details - The details of the alert
+#   #    * .Time - The time the alert event occurred
+#   #    * .Duration - The duration of the alert event.
+#   #    * .Level - The level of the alert, i.e INFO, WARN, or CRITICAL.
+#   #    * .Data - The data that triggered the alert.
+#   #
+#   # Specify the template inline.
+#   alert-template = "{{.Message}}:{{range .Data.Series}}{{.Tags}},{{range .Values}}{{.}}{{end}}{{end}}"
+#   # Specify an absolute path to a template file.
+#   alert-template-file = "/path/to/template/file"
+#
+#   # Provide a row template for constructing a custom HTTP body.
+#   # Row templates are only used with httpPost pipeline nodes as they consume a row at a time.
+#   # The template uses https://golang.org/pkg/text/template/ and has access to the following fields:
+#   #    * .Name - The measurement name of the data stream
+#   #    * .Tags - A map of tags on the data.
+#   #    * .Values - A list of values, each entry is a map containing a "time" key for the time of the point
+#   #       and keys for all other fields on the point.
+#   #
+#   # Specify the template inline.
+#   row-template = "{{.Name}} host={{index .Tags \"host\"}}{{range .Values}} {{index . "time"}} {{index . "value"}}{{end}}"
+#   # Specify an absolute path to a template file.
+#   row-template-file = "/path/to/template/file"
+
+[slack]
+  # Configure Slack.
+  enabled = true
+  # The Slack webhook URL, can be obtained by adding
+  # an Incoming Webhook integration.
+  # Visit https://slack.com/services/new/incoming-webhook
+  # to add new webhook for Kapacitor.
+  url = "https://hooks.slack.com/services/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"
+  # Default channel for messages
+  channel = "#clmc"
+  # If true all the alerts will be sent to Slack
+  # without explicitly marking them in the TICKscript.
+  global = false
+  # Only applies if global is true.
+  # Sets all alerts in state-changes-only mode,
+  # meaning alerts will only be sent if the alert state changes.
+  state-changes-only = false
+
+[telegram]
+  # Configure Telegram.
+  enabled = false
+  # The Telegram Bot URL should not need to be changed.
+  url = "https://api.telegram.org/bot"
+  # Telegram Bot Token, can be obtained From @BotFather.
+  token = ""
+  # Default recipient for messages, Contact @myidbot on Telegram to get an ID.
+  chat-id = ""
+  # Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your alert message.
+  #parse-mode  = "Markdown"
+  # Disable link previews for links in this message
+  disable-web-page-preview = false
+  # Sends the message silently. iOS users will not receive a notification, Android users will receive a notification with no sound.
+  disable-notification = false
+  # If true the all alerts will be sent to Telegram
+  # without explicitly marking them in the TICKscript.
+  global = false
+  # Only applies if global is true.
+  # Sets all alerts in state-changes-only mode,
+  # meaning alerts will only be sent if the alert state changes.
+  state-changes-only = false
+
+[hipchat]
+  # Configure HipChat.
+  enabled = false
+  # The HipChat API URL. Replace subdomain with your
+  # HipChat subdomain.
+  # url = "https://subdomain.hipchat.com/v2/room"
+  # Visit https://www.hipchat.com/docs/apiv2
+  # for information on obtaining your room id and
+  # authentication token.
+  # Default room for messages
+  room = ""
+  # Default authentication token
+  token = ""
+  # If true then all alerts will be sent to HipChat
+  # without explicitly marking them in the TICKscript.
+  global = false
+  # Only applies if global is true.
+  # Sets all alerts in state-changes-only mode,
+  # meaning alerts will only be sent if the alert state changes.
+  state-changes-only = false
+
+[alerta]
+  # Configure Alerta.
+  enabled = false
+  # The Alerta URL.
+  url = ""
+  # Default authentication token.
+  token = ""
+  # Default token prefix
+  # If you are on older versions of alerta you may need to change this to "Key"
+  token-prefix = "Bearer"
+  # Default environment.
+  environment = ""
+  # Default origin.
+  origin = "kapacitor"
+
+[sensu]
+  # Configure Sensu.
+  enabled = false
+  # The Sensu Client host:port address.
+  addr = "sensu-client:3030"
+  # Default JIT source.
+  source = "Kapacitor"
+
+[reporting]
+  # Send usage statistics
+  # every 12 hours to Enterprise.
+  enabled = true
+  url = "https://usage.influxdata.com"
+
+[stats]
+  # Emit internal statistics about Kapacitor.
+  # To consume these stats create a stream task
+  # that selects data from the configured database
+  # and retention policy.
+  #
+  # Example:
+  #  stream|from().database('_kapacitor').retentionPolicy('autogen')...
+  #
+  enabled = true
+  stats-interval = "10s"
+  database = "_kapacitor"
+  retention-policy= "autogen"
+
+[udf]
+# Configuration for UDFs (User Defined Functions)
+[udf.functions]
+    # Example go UDF.
+    # First compile example:
+    #   go build -o avg_udf ./udf/agent/examples/moving_avg.go
+    #
+    # Use in TICKscript like:
+    #   stream.goavg()
+    #           .field('value')
+    #           .size(10)
+    #           .as('m_average')
+    #
+    # uncomment to enable
+    #[udf.functions.goavg]
+    #   prog = "./avg_udf"
+    #   args = []
+    #   timeout = "10s"
+
+    # Example python UDF.
+    # Use in TICKscript like:
+    #   stream.pyavg()
+    #           .field('value')
+    #           .size(10)
+    #           .as('m_average')
+    #
+    # uncomment to enable
+    #[udf.functions.pyavg]
+    #   prog = "/usr/bin/python2"
+    #   args = ["-u", "./udf/agent/examples/moving_avg.py"]
+    #   timeout = "10s"
+    #   [udf.functions.pyavg.env]
+    #       PYTHONPATH = "./udf/agent/py"
+
+    # Example UDF over a socket
+    #[udf.functions.myCustomUDF]
+    #   socket = "/path/to/socket"
+    #   timeout = "10s"
+
+[talk]
+  # Configure Talk.
+  enabled = false
+  # The Talk webhook URL.
+  url = "https://jianliao.com/v2/services/webhook/uuid"
+  # The default authorName.
+  author_name = "Kapacitor"
+
+# MQTT client configuration.
+#  Mutliple different clients may be configured by
+#  repeating [[mqtt]] sections.
+[[mqtt]]
+  enabled = false
+  # Unique name for this broker configuration
+  name = "localhost"
+  # Whether this broker configuration is the default
+  default = true
+  # URL of the MQTT broker.
+  # Possible protocols include:
+  #  tcp - Raw TCP network connection
+  #  ssl - TLS protected TCP network connection
+  #  ws  - Websocket network connection
+  url = "tcp://localhost:1883"
+
+  # TLS/SSL configuration
+  # A CA can be provided without a key/cert pair
+  #   ssl-ca = "/etc/kapacitor/ca.pem"
+  # Absolutes paths to pem encoded key and cert files.
+  #   ssl-cert = "/etc/kapacitor/cert.pem"
+  #   ssl-key = "/etc/kapacitor/key.pem"
+
+  # Unique ID for this MQTT client.
+  # If empty used the value of "name"
+  client-id = ""
+
+  # Username
+  username = ""
+  # Password
+  password = ""
+
+[[swarm]]
+  # Enable/Disable the Docker Swarm service.
+  # Needed by the swarmAutoscale TICKscript node.
+  enabled = false
+  # Unique ID for this Swarm cluster
+  # NOTE: This is not the ID generated by Swarm rather a user defined
+  # ID for this cluster since Kapacitor can communicate with multiple clusters.
+  id = ""
+  # List of URLs for Docker Swarm servers.
+  servers = ["http://localhost:2376"]
+  # TLS/SSL Configuration for connecting to secured Docker daemons
+  ssl-ca = ""
+  ssl-cert = ""
+  ssl-key = ""
+  insecure-skip-verify = false
+
+##################################
+# Input Methods, same as InfluxDB
+#
+
+[collectd]
+  enabled = false
+  bind-address = ":25826"
+  database = "collectd"
+  retention-policy = ""
+  batch-size = 1000
+  batch-pending = 5
+  batch-timeout = "10s"
+  typesdb = "/usr/share/collectd/types.db"
+
+[opentsdb]
+  enabled = false
+  bind-address = ":4242"
+  database = "opentsdb"
+  retention-policy = ""
+  consistency-level = "one"
+  tls-enabled = false
+  certificate = "/etc/ssl/influxdb.pem"
+  batch-size = 1000
+  batch-pending = 5
+  batch-timeout = "1s"
+
+# Service Discovery and metric scraping
+
+[[scraper]]
+  enabled = false
+  name = "myscraper"
+  # Specify the id of a discoverer service specified below
+  discoverer-id = ""
+  # Specify the type of discoverer service being used.
+  discoverer-service = ""
+  db = "prometheus_raw"
+  rp = "autogen"
+  type = "prometheus"
+  scheme = "http"
+  metrics-path = "/metrics"
+  scrape-interval = "1m0s"
+  scrape-timeout = "10s"
+  username = ""
+  password = ""
+  bearer-token = ""
+  ssl-ca = ""
+  ssl-cert = ""
+  ssl-key = ""
+  ssl-server-name = ""
+  insecure-skip-verify = false
+
+# Supported discovery services
+
+[[azure]]
+  enabled = false
+  id = "myazure"
+  port = 80
+  subscription-id = ""
+  tenant-id = ""
+  client-id = ""
+  client-secret = ""
+  refresh-interval = "5m0s"
+
+[[consul]]
+  enabled = false
+  id = "myconsul"
+  address = "127.0.0.1:8500"
+  token = ""
+  datacenter = ""
+  tag-separator = ","
+  scheme = "http"
+  username = ""
+  password = ""
+  ssl-ca = ""
+  ssl-cert = ""
+  ssl-key = ""
+  ssl-server-name = ""
+  insecure-skip-verify = false
+
+[[dns]]
+  enabled = false
+  id = "mydns"
+  refresh-interval = "30s"
+  ## Type can be SRV, A, or AAAA
+  type = "SRV"
+  ## Port is the port to scrape for records returned by A or AAAA types
+  port = 80
+
+[[ec2]]
+  enabled = false
+  id = "myec2"
+  region = "us-east-1"
+  access-key = ""
+  secret-key = ""
+  profile = ""
+  refresh-interval = "1m0s"
+  port = 80
+
+[[file-discovery]]
+  enabled = false
+  id = "myfile"
+  refresh-interval = "5m0s"
+  files = []
+
+[[gce]]
+  enabled = false
+  id = "mygce"
+  project = ""
+  zone = ""
+  filter = ""
+  refresh-interval = "1m0s"
+  port = 80
+  tag-separator = ","
+
+[[marathon]]
+  enabled = false
+  id = "mymarathon"
+  timeout = "30s"
+  refresh-interval = "30s"
+  bearer-token = ""
+  ssl-ca = ""
+  ssl-cert = ""
+  ssl-key = ""
+  ssl-server-name = ""
+  insecure-skip-verify = false
+
+[[nerve]]
+  enabled = false
+  id = "mynerve"
+  timeout = "10s"
+
+[[serverset]]
+  enabled = false
+  id = "myserverset"
+  timeout = "10s"
+
+[[static-discovery]]
+  enabled = false
+  id = "mystatic"
+  targets = ["localhost:9100"]
+  [static.labels]
+    region = "us-east-1"
+
+[[triton]]
+  enabled = false
+  id = "mytriton"
+  account = ""
+  dns-suffix = ""
+  endpoint = ""
+  port = 9163
+  refresh-interval = "1m0s"
+  version = 1
+  ssl-ca = ""
+  ssl-cert = ""
+  ssl-key = ""
+  ssl-server-name = ""
+  insecure-skip-verify = false
diff --git a/test/streaming/kapacitor.json b/test/streaming/kapacitor.json
new file mode 100644
index 0000000000000000000000000000000000000000..60118860fc3351e456f1cccfe6145a169f50734f
--- /dev/null
+++ b/test/streaming/kapacitor.json
@@ -0,0 +1,6 @@
+{
+    "id": "1",
+    "name": "CLMCKapacitor",
+    "url": "http://localhost:9092",
+    "active": false
+}
\ No newline at end of file
diff --git a/test/streaming/manual.md b/test/streaming/manual.md
new file mode 100644
index 0000000000000000000000000000000000000000..a9e6be6eb29dc708afb4ba32cc53a663161525ca
--- /dev/null
+++ b/test/streaming/manual.md
@@ -0,0 +1,140 @@
+<!--
+// © University of Southampton IT Innovation Centre, 2017
+//
+// Copyright in this software belongs to University of Southampton
+// IT Innovation Centre of Gamma House, Enterprise Road, 
+// Chilworth Science Park, Southampton, SO16 7NS, UK.
+//
+// This software may not be used, sold, licensed, transferred, copied
+// or reproduced in whole or in part in any manner or form or in or
+// on any media by any person other than in accordance with the terms
+// of the Licence Agreement supplied with the software, or otherwise
+// without the prior written consent of the copyright owners.
+//
+// This software is distributed WITHOUT ANY WARRANTY, without even the
+// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+// PURPOSE, except where stated in the Licence Agreement supplied with
+// the software.
+//
+//      Created By :            Michael Boniface
+//      Updated By :            Simon Crowle
+//      Created Date :          18-12-2017
+//      Update Date :           14-02-2018
+//      Created for Project :   FLAME
+-->
+
+# CLMC Adaptive Streaming Test
+
+This test streams mpeg-dash video using the two apache servers monitored by Telegraf configured with a default apache plugin and a net_response plugin. The data is stored in the `clmc-service` using database `CLMCMetrics` and measurements `apache` and `net_response`
+
+The following command brings up the services
+
+`vagrant --infra=streaming up`
+
+* clmc-service: configured with influx, kapacitor, chornograf
+* apache1@DC1, apache2@DC2: configured with apache and a test video located at http://192.168.50.11:80/test_video/stream.mpd on the internal vbox network and at http://localhost:8081/test_video/stream.mpd if accessing from the host machine
+
+### Run the test set-up
+
+`vagrant --infra=streaming ssh clmc-service -- "sudo /vagrant/test/streaming/setup.sh /vagrant/test/streaming"`
+
+### Run the automated test
+
+To run the load test using the following command
+
+`vagrant --infra=streaming ssh loadtest-streaming -- "/vagrant/test/streaming/run.sh"`
+
+This test currently just generates the load and does not have any assertions. It breaks at 1000.
+
+
+### Manual test 
+
+And then point your browser to:
+
+`http://localhost:8888`
+
+## Manual set-up of Chronograf's CLMC data source
+
+If you __do not__ want to run the automatic set-up, basic entry to the Chronograf dashboard is as follows:
+
+1. Point your browser to: [http://localhost:8888](http://localhost:8888)
+2. Enter your connection string: `http://localhost:8086`
+3. Enter the Name: `Influx 1`
+4. Enter the Telegraf database: `CLMCMetrics`
+
+## Manual test on Windows
+
+### View the video
+Install VLC video client on the host machine, you must use a very recent version otherwise the MPD file cannot we read. At the time of writng the following nighly build was installed:
+
+https://nightlies.videolan.org/build/win32/vlc-3.0.0-rc1-20171201-0326/vlc-3.0.0-20171201-0326-rc1-win32.exe
+
+Start the VLC Player
+
+`Media->Open Network Stream`
+
+The test video is the FLAME project video and it can be viewed at the following location.
+
+`Enter the network URL: http://localhost:8081/test-video/stream.mpd for apache1 server`
+
+The video should play.
+
+### Query the data
+
+Open Chronograph by entering the following URL into a browser on the host http://localhost:8888. Your CLMC data source, Kapacitor and demonstration dashboard should be ready for you to explore.
+
+Press the Data Explorer in the menu and select the apache measurement and create a query such as 
+
+`SELECT mean("BytesPerSec") AS "mean_BytesPerSec" FROM "CLMCMetrics"."autogen"."apache" WHERE time > now() - 5m GROUP BY time(10s)`
+
+## KPI triggers
+
+In this demonstrator an example KPI rule has been set up in Kapacitor which fires when the average requests per second on the Apache 1 or Apache2 server goes above certain thresholds ( a 'warning' at 0.2 requests/second and a 'critical' message at 0.5 requests/second ). The TICKscript specification for this rule is as follows:
+
+```
+dbrp "CLMCMetrics"."autogen"
+
+// Apache 1 rule
+// -------------
+var a1Data = batch
+    |query(''' SELECT mean("ReqPerSec") AS "mean_RPS" FROM "CLMCMetrics"."autogen"."apache" WHERE "ipendpoint"='adaptive_streaming_I1_apache1' ''')
+        .period(5s)
+        .every(5s)
+
+var a1Alert = a1Data
+    |alert()
+        .id('{{ .Name }}/adaptive_streaming_I1_apache1')
+        .message('{{ .ID }} is {{ .Level }} Mean Requests Per Second: {{ index .Fields "mean_RPS" }}')
+        .warn(lambda: "mean_RPS" > 0.2)
+        .crit(lambda: "mean_RPS" > 0.5)
+        .slack()
+        .log( '/tmp/RPSLoad.log' )
+
+// Apache 2 rule
+// -------------
+var a2Data = batch
+    |query(''' SELECT mean("ReqPerSec") AS "mean_RPS" FROM "CLMCMetrics"."autogen"."apache" WHERE "ipendpoint"='adaptive_streaming_I1_apache2' ''')
+        .period(5s)
+        .every(5s)
+
+var a2Alert = a2Data
+    |alert()
+        .id('{{ .Name }}/adaptive_streaming_I1_apache2')
+        .message('{{ .ID }} is {{ .Level }} Mean Requests Per Second: {{ index .Fields "mean_RPS" }}')
+        .warn(lambda: "mean_RPS" > 0.2)
+        .crit(lambda: "mean_RPS" > 0.5)
+        .slack()
+        .log( '/tmp/RPSLoad.log' )
+```
+
+Alerts are sent both an internal logging within the CLMC service file system and also to a FLAME demo Slack service:
+
+https://flamedemo-itinnov.slack.com
+
+Alerts can be found under the '#clmc' channel. 
+
+### Kapacitor rules in Chronograf's GUI
+
+Additional rules can be added to this demonstrator either via the Chronograf GUI (see [here](https://docs.influxdata.com/chronograf/v1.4/introduction/getting-started/#4-connect-chronograf-to-kapacitor) for more information) or by using the Kapacitor HTTP API and TICKscript (for an introduction, [look here](https://docs.influxdata.com/kapacitor/v1.4/tick/)).
+ 
+
diff --git a/test/streaming/rules.json b/test/streaming/rules.json
new file mode 100644
index 0000000000000000000000000000000000000000..f2406fae085d072b80f513a5fab5dc9a53a19c5c
--- /dev/null
+++ b/test/streaming/rules.json
@@ -0,0 +1,9 @@
+{
+    "id" : "Request_Rate_Alert_ApacheServers",
+    "type" : "batch",
+    "dbrps" : [{"db": "CLMCMetrics", "rp" : "autogen"}],
+    
+    "script" : "\/\/ Apache 1 rule\r\n\/\/ -------------\r\nvar a1Data = batch\r\n    |query(''' SELECT mean(\"ReqPerSec\") AS \"mean_RPS\" FROM \"CLMCMetrics\".\"autogen\".\"apache\" WHERE \"ipendpoint\"='adaptive_streaming_I1_apache1' ''')\r\n        .period(5s)\r\n        .every(5s)\r\n\r\nvar a1Alert = a1Data\r\n    |alert()\r\n        .id('{{ .Name }}\/adaptive_streaming_I1_apache1')\r\n        .message('{{ .ID }} is {{ .Level }} Mean Requests Per Second: {{ index .Fields \"mean_RPS\" }}')\r\n        .warn(lambda: \"mean_RPS\" > 0.2)\r\n        .crit(lambda: \"mean_RPS\" > 0.5)\r\n        .slack()\r\n        .log( '\/tmp\/RPSLoad.log' )\r\n\r\n\/\/ Apache 2 rule\r\n\/\/ -------------\r\nvar a2Data = batch\r\n    |query(''' SELECT mean(\"ReqPerSec\") AS \"mean_RPS\" FROM \"CLMCMetrics\".\"autogen\".\"apache\" WHERE \"ipendpoint\"='adaptive_streaming_I1_apache2' ''')\r\n        .period(5s)\r\n        .every(5s)\r\n\r\nvar a2Alert = a2Data\r\n    |alert()\r\n        .id('{{ .Name }}\/adaptive_streaming_I1_apache2')\r\n        .message('{{ .ID }} is {{ .Level }} Mean Requests Per Second: {{ index .Fields \"mean_RPS\" }}')\r\n        .warn(lambda: \"mean_RPS\" > 0.2)\r\n        .crit(lambda: \"mean_RPS\" > 0.5)\r\n        .slack()\r\n        .log( '\/tmp\/RPSLoad.log' )",
+    
+    "status" : "enabled"
+}
\ No newline at end of file
diff --git a/test/streaming/run.sh b/test/streaming/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..96276972ecf3c86bb638c9aa2363aa17b3032095
--- /dev/null
+++ b/test/streaming/run.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+#/////////////////////////////////////////////////////////////////////////
+#//
+#// (c) University of Southampton IT Innovation Centre, 2017
+#//
+#// Copyright in this software belongs to University of Southampton
+#// IT Innovation Centre of Gamma House, Enterprise Road,
+#// Chilworth Science Park, Southampton, SO16 7NS, UK.
+#//
+#// This software may not be used, sold, licensed, transferred, copied
+#// or reproduced in whole or in part in any manner or form or in or
+#// on any media by any person other than in accordance with the terms
+#// of the Licence Agreement supplied with the software, or otherwise
+#// without the prior written consent of the copyright owners.
+#//
+#// This software is distributed WITHOUT ANY WARRANTY, without even the
+#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+#// PURPOSE, except where stated in the Licence Agreement supplied with
+#// the software.
+#//
+#//      Created By :            Michael Boniface
+#//      Created Date :          15/02/2017
+#//      Created for Project :   FLAME
+#//
+#/////////////////////////////////////////////////////////////////////////
+
+# create test directories
+testfolder=$(date +%Y%m%d%H%M%S)
+testrundir="/vagrant/test/run"
+testdir=$testrundir"/streaming/"$testfolder
+echo "Test directory: "$testdir
+mkdir -p  "$testdir"
+
+# run testplan
+cd $testdir
+
+jmeter -n -LDEBUG -t /vagrant/test/streaming/testplan.jmx -l results.jtx -j jmeter.log
+
+# quick bash equivalent in case Jmeter fails
+#COUNTER=0
+#while [  $COUNTER -lt 1 ]; do
+#  cvlc -Vdummy --no-audio http://192.168.50.11/test_video/stream.mpd &
+#  sleep 1
+#  let COUNTER=COUNTER+1 
+#done
+
+
+
diff --git a/test/streaming/setup.sh b/test/streaming/setup.sh
new file mode 100644
index 0000000000000000000000000000000000000000..7490d60d3f87fc385141594e60014d5c4cb8b403
--- /dev/null
+++ b/test/streaming/setup.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+#/////////////////////////////////////////////////////////////////////////
+#//
+#// (c) University of Southampton IT Innovation Centre, 2018
+#//
+#// Copyright in this software belongs to University of Southampton
+#// IT Innovation Centre of Gamma House, Enterprise Road,
+#// Chilworth Science Park, Southampton, SO16 7NS, UK.
+#//
+#// This software may not be used, sold, licensed, transferred, copied
+#// or reproduced in whole or in part in any manner or form or in or
+#// on any media by any person other than in accordance with the terms
+#// of the Licence Agreement supplied with the software, or otherwise
+#// without the prior written consent of the copyright owners.
+#//
+#// This software is distributed WITHOUT ANY WARRANTY, without even the
+#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+#// PURPOSE, except where stated in the Licence Agreement supplied with
+#// the software.
+#//
+#//      Created By :            Simon Crowle
+#//      Created Date :          14/02/2018
+#//      Created for Project :   FLAME
+#//
+#/////////////////////////////////////////////////////////////////////////
+
+TEST_DIR=$1
+
+# copy Kapacitor conf to /etc/kapacitor and restart
+
+systemctl stop kapacitor
+echo $TEST_DIR"/kapacitor.conf"
+cp $TEST_DIR/kapacitor.conf /etc/kapacitor/kapacitor.conf
+systemctl start kapacitor
+
+# Set up Influx data source
+curl -i -X POST -H "Content-Type: application/json" http://localhost:8888/chronograf/v1/sources -d @$TEST_DIR/influx.json
+
+# Set up Kapacitor
+curl -i -X POST -H "Content-Type: application/json" http://localhost:8888/chronograf/v1/sources/1/kapacitors -d @$TEST_DIR/kapacitor.json
+
+# Set up rules
+curl -i -X POST -H "Content-Type: application/json" http://localhost:9092/kapacitor/v1/tasks -d @$TEST_DIR/rules.json
+
+# Set up dashboard
+curl -i -X POST -H "Content-Type: application/json" http://localhost:8888/chronograf/v1/dashboards -d @$TEST_DIR/dashboard.json
\ No newline at end of file
diff --git a/test/streaming/stop.sh b/test/streaming/stop.sh
new file mode 100644
index 0000000000000000000000000000000000000000..b9953899d5d08b3ead91d60438532524e5a19525
--- /dev/null
+++ b/test/streaming/stop.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+for pid in $(ps -ef | grep "/usr/bin/vlc" | awk '{print $2}'); do kill -9 $pid; done
\ No newline at end of file
diff --git a/test/streaming/testplan.jmx b/test/streaming/testplan.jmx
new file mode 100644
index 0000000000000000000000000000000000000000..065e14c0f91203bf0d6a4a80a751bbf0b89a502c
--- /dev/null
+++ b/test/streaming/testplan.jmx
@@ -0,0 +1,67 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<jmeterTestPlan version="1.2" properties="3.2" jmeter="3.3 r1808647">
+  <hashTree>
+    <TestPlan guiclass="TestPlanGui" testclass="TestPlan" testname="Test Plan" enabled="true">
+      <stringProp name="TestPlan.comments"></stringProp>
+      <boolProp name="TestPlan.functional_mode">false</boolProp>
+      <boolProp name="TestPlan.serialize_threadgroups">false</boolProp>
+      <elementProp name="TestPlan.user_defined_variables" elementType="Arguments" guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+        <collectionProp name="Arguments.arguments"/>
+      </elementProp>
+      <stringProp name="TestPlan.user_define_classpath"></stringProp>
+    </TestPlan>
+    <hashTree>
+      <ThreadGroup guiclass="ThreadGroupGui" testclass="ThreadGroup" testname="StreamingGroupApache1" enabled="true">
+        <stringProp name="ThreadGroup.on_sample_error">continue</stringProp>
+        <elementProp name="ThreadGroup.main_controller" elementType="LoopController" guiclass="LoopControlPanel" testclass="LoopController" testname="Loop Controller" enabled="true">
+          <boolProp name="LoopController.continue_forever">false</boolProp>
+          <intProp name="LoopController.loops">-1</intProp>
+        </elementProp>
+        <stringProp name="ThreadGroup.num_threads">50</stringProp>
+        <stringProp name="ThreadGroup.ramp_time">1</stringProp>
+        <longProp name="ThreadGroup.start_time">1518691643000</longProp>
+        <longProp name="ThreadGroup.end_time">1518691643000</longProp>
+        <boolProp name="ThreadGroup.scheduler">true</boolProp>
+        <stringProp name="ThreadGroup.duration">20</stringProp>
+        <stringProp name="ThreadGroup.delay">0</stringProp>
+      </ThreadGroup>
+      <hashTree>
+        <SystemSampler guiclass="SystemSamplerGui" testclass="SystemSampler" testname="VLC Client" enabled="true">
+          <boolProp name="SystemSampler.checkReturnCode">false</boolProp>
+          <stringProp name="SystemSampler.expectedReturnCode">0</stringProp>
+          <stringProp name="SystemSampler.command">cvlc</stringProp>
+          <elementProp name="SystemSampler.arguments" elementType="Arguments" guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+            <collectionProp name="Arguments.arguments">
+              <elementProp name="" elementType="Argument">
+                <stringProp name="Argument.name"></stringProp>
+                <stringProp name="Argument.value">-Vdummy</stringProp>
+                <stringProp name="Argument.metadata">=</stringProp>
+              </elementProp>
+              <elementProp name="" elementType="Argument">
+                <stringProp name="Argument.name"></stringProp>
+                <stringProp name="Argument.value">--no-audio</stringProp>
+                <stringProp name="Argument.metadata">=</stringProp>
+              </elementProp>
+              <elementProp name="" elementType="Argument">
+                <stringProp name="Argument.name"></stringProp>
+                <stringProp name="Argument.value">http://192.168.50.11/test_video/stream.mpd</stringProp>
+                <stringProp name="Argument.metadata">=</stringProp>
+              </elementProp>
+            </collectionProp>
+          </elementProp>
+          <elementProp name="SystemSampler.environment" elementType="Arguments" guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+            <collectionProp name="Arguments.arguments"/>
+          </elementProp>
+          <stringProp name="SystemSampler.directory"></stringProp>
+          <stringProp name="SystemSampler.stdout">stdout${__threadNum}</stringProp>
+          <longProp name="SystemSampler.timeout">20000</longProp>
+        </SystemSampler>
+        <hashTree/>
+      </hashTree>
+    </hashTree>
+    <WorkBench guiclass="WorkBenchGui" testclass="WorkBench" testname="WorkBench" enabled="true">
+      <boolProp name="WorkBench.save">true</boolProp>
+    </WorkBench>
+    <hashTree/>
+  </hashTree>
+</jmeterTestPlan>