diff --git a/test/services/apache/install-apache.sh b/test/services/apache/install-apache.sh
new file mode 100644
index 0000000000000000000000000000000000000000..735fc0a46e4dbe491ce82edba7b5aeb17d84c005
--- /dev/null
+++ b/test/services/apache/install-apache.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+#/////////////////////////////////////////////////////////////////////////
+#//
+#// (c) University of Southampton IT Innovation Centre, 2017
+#//
+#// Copyright in this software belongs to University of Southampton
+#// IT Innovation Centre of Gamma House, Enterprise Road,
+#// Chilworth Science Park, Southampton, SO16 7NS, UK.
+#//
+#// This software may not be used, sold, licensed, transferred, copied
+#// or reproduced in whole or in part in any manner or form or in or
+#// on any media by any person other than in accordance with the terms
+#// of the Licence Agreement supplied with the software, or otherwise
+#// without the prior written consent of the copyright owners.
+#//
+#// This software is distributed WITHOUT ANY WARRANTY, without even the
+#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+#// PURPOSE, except where stated in the Licence Agreement supplied with
+#// the software.
+#//
+#//      Created By :            Michael Boniface
+#//      Created Date :          23/01/2018
+#//      Created for Project :   FLAME
+#//
+#/////////////////////////////////////////////////////////////////////////
+
+# Install apache
+sudo apt-get update
+sudo apt-get -y install apache2
\ No newline at end of file
diff --git a/test/services/apache/telegraf_apache_template.conf b/test/services/apache/telegraf_apache_template.conf
new file mode 100644
index 0000000000000000000000000000000000000000..99f334996bd9f67a4465cd27950978a2803d69d4
--- /dev/null
+++ b/test/services/apache/telegraf_apache_template.conf
@@ -0,0 +1,133 @@
+# Telegraf configuration
+
+# Telegraf is entirely plugin driven. All metrics are gathered from the
+# declared inputs, and sent to the declared outputs.
+
+# Plugins must be declared in here to be active.
+# To deactivate a plugin, comment out the name and any variables.
+
+# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
+# file would generate.
+
+# Global tags can be specified here in key="value" format.
+[global_tags]
+  # location of the data centre
+  location="{{LOCATION}}"
+  # media service template id
+  sfc="{{SFC_ID}}"
+  # media service instance
+  sfc_i="{{SFC_ID_INSTANCE}}"
+  # service function type
+  sf="{{SF_ID}}"
+  # service function instance id
+  sf_i="{{SF_ID_INSTANCE}}"
+  # ipendpoint id aka surrogate instance
+  ipendpoint="{{IP_ENDPOINT_ID}}"
+
+# Configuration for telegraf agent
+[agent]
+  ## Default data collection interval for all inputs
+  interval = "10s"
+  ## Rounds collection interval to 'interval'
+  ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
+  round_interval = true
+
+  ## Telegraf will cache metric_buffer_limit metrics for each output, and will
+  ## flush this buffer on a successful write.
+  metric_buffer_limit = 1000
+  ## Flush the buffer whenever full, regardless of flush_interval.
+  flush_buffer_when_full = true
+
+  ## Collection jitter is used to jitter the collection by a random amount.
+  ## Each plugin will sleep for a random time within jitter before collecting.
+  ## This can be used to avoid many plugins querying things like sysfs at the
+  ## same time, which can have a measurable effect on the system.
+  collection_jitter = "0s"
+
+  ## Default flushing interval for all outputs. You shouldn't set this below
+  ## interval. Maximum flush_interval will be flush_interval + flush_jitter
+  flush_interval = "10s"
+  ## Jitter the flush interval by a random amount. This is primarily to avoid
+  ## large write spikes for users running a large number of telegraf instances.
+  ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
+  flush_jitter = "0s"
+
+  ## Logging configuration:
+  ## Run telegraf in debug mode
+  debug = false
+  ## Run telegraf in quiet mode
+  quiet = false
+  ## Specify the log file name. The empty string means to log to stdout.
+  logfile = "G:/Telegraf/telegraf.log"
+
+  ## Override default hostname, if empty use os.Hostname()
+  hostname = ""
+
+
+###############################################################################
+#                                  OUTPUTS                                    #
+###############################################################################
+
+# Configuration for influxdb server to send metrics to
+[[outputs.influxdb]]
+  # The full HTTP or UDP endpoint URL for your InfluxDB instance.
+  # Multiple urls can be specified but it is assumed that they are part of the same
+  # cluster, this means that only ONE of the urls will be written to each interval.
+  # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
+  urls = ["{{INFLUXDB_URL}}"] # required
+  # The target database for metrics (telegraf will create it if not exists)
+  database = "{{DATABASE_NAME}}" # required
+  # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+  # note: using second precision greatly helps InfluxDB compression
+  precision = "s"
+
+  ## Write timeout (for the InfluxDB client), formatted as a string.
+  ## If not provided, will default to 5s. 0s means no timeout (not recommended).
+  timeout = "5s"
+  # username = "telegraf"
+  # password = "metricsmetricsmetricsmetrics"
+  # Set the user agent for HTTP POSTs (can be useful for log differentiation)
+  # user_agent = "telegraf"
+  # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
+  # udp_payload = 512
+
+
+###############################################################################
+#                                  INPUTS                                     #
+###############################################################################
+# # Influx HTTP write listener
+[[inputs.http_listener]]
+  ## Address and port to host HTTP listener on
+  service_address = ":8186"
+
+  ## timeouts
+  read_timeout = "10s"
+  write_timeout = "10s"
+
+  ## HTTPS
+  #tls_cert= "/etc/telegraf/cert.pem"
+  #tls_key = "/etc/telegraf/key.pem"
+
+  ## MTLS
+  #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+
+# Read Apache status information (mod_status)
+[[inputs.apache]]
+  ## An array of URLs to gather from, must be directed at the machine
+  ## readable version of the mod_status page including the auto query string.
+  ## Default is "http://localhost/server-status?auto".
+  urls = ["http://localhost/server-status?auto"]
+
+  ## Credentials for basic HTTP authentication.
+  # username = "myuser"
+  # password = "mypassword"
+
+  ## Maximum time to receive response.
+  # response_timeout = "5s"
+
+  ## Optional SSL Config
+  # ssl_ca = "/etc/telegraf/ca.pem"
+  # ssl_cert = "/etc/telegraf/cert.pem"
+  # ssl_key = "/etc/telegraf/key.pem"
+  ## Use SSL but skip chain & host verification
+  # insecure_skip_verify = false
\ No newline at end of file
diff --git a/test/services/mongo/install-mongo.sh b/test/services/mongo/install-mongo.sh
new file mode 100644
index 0000000000000000000000000000000000000000..25797d14568ba3fbc8b84dec1d2f5e969a861180
--- /dev/null
+++ b/test/services/mongo/install-mongo.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+#/////////////////////////////////////////////////////////////////////////
+#//
+#// (c) University of Southampton IT Innovation Centre, 2017
+#//
+#// Copyright in this software belongs to University of Southampton
+#// IT Innovation Centre of Gamma House, Enterprise Road,
+#// Chilworth Science Park, Southampton, SO16 7NS, UK.
+#//
+#// This software may not be used, sold, licensed, transferred, copied
+#// or reproduced in whole or in part in any manner or form or in or
+#// on any media by any person other than in accordance with the terms
+#// of the Licence Agreement supplied with the software, or otherwise
+#// without the prior written consent of the copyright owners.
+#//
+#// This software is distributed WITHOUT ANY WARRANTY, without even the
+#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+#// PURPOSE, except where stated in the Licence Agreement supplied with
+#// the software.
+#//
+#//      Created By :            Michael Boniface
+#//      Created Date :          23/01/2018
+#//      Created for Project :   FLAME
+#//
+#/////////////////////////////////////////////////////////////////////////
+
+# Install apache
+sudo apt-get update
+sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 2930ADAE8CAF5059EE73BB4B58712A2291FA4AD5
+echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.6 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.6.list
+sudo apt-get update
+sudo apt-get install -y mongodb-org
diff --git a/test/services/mongo/telegraf_mongo_template.conf b/test/services/mongo/telegraf_mongo_template.conf
new file mode 100644
index 0000000000000000000000000000000000000000..e65c22f60894f586a0da06038b085885235aba63
--- /dev/null
+++ b/test/services/mongo/telegraf_mongo_template.conf
@@ -0,0 +1,128 @@
+# Telegraf configuration
+
+# Telegraf is entirely plugin driven. All metrics are gathered from the
+# declared inputs, and sent to the declared outputs.
+
+# Plugins must be declared in here to be active.
+# To deactivate a plugin, comment out the name and any variables.
+
+# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
+# file would generate.
+
+# Global tags can be specified here in key="value" format.
+[global_tags]
+  # location of the data centre
+  location="{{LOCATION}}"
+  # media service template id
+  sfc="{{SFC_ID}}"
+  # media service instance
+  sfc_i="{{SFC_ID_INSTANCE}}"
+  # service function type
+  sf="{{SF_ID}}"
+  # service function instance id
+  sf_i="{{SF_ID_INSTANCE}}"
+  # ipendpoint id aka surrogate instance
+  ipendpoint="{{IP_ENDPOINT_ID}}"
+
+# Configuration for telegraf agent
+[agent]
+  ## Default data collection interval for all inputs
+  interval = "10s"
+  ## Rounds collection interval to 'interval'
+  ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
+  round_interval = true
+
+  ## Telegraf will cache metric_buffer_limit metrics for each output, and will
+  ## flush this buffer on a successful write.
+  metric_buffer_limit = 1000
+  ## Flush the buffer whenever full, regardless of flush_interval.
+  flush_buffer_when_full = true
+
+  ## Collection jitter is used to jitter the collection by a random amount.
+  ## Each plugin will sleep for a random time within jitter before collecting.
+  ## This can be used to avoid many plugins querying things like sysfs at the
+  ## same time, which can have a measurable effect on the system.
+  collection_jitter = "0s"
+
+  ## Default flushing interval for all outputs. You shouldn't set this below
+  ## interval. Maximum flush_interval will be flush_interval + flush_jitter
+  flush_interval = "10s"
+  ## Jitter the flush interval by a random amount. This is primarily to avoid
+  ## large write spikes for users running a large number of telegraf instances.
+  ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
+  flush_jitter = "0s"
+
+  ## Logging configuration:
+  ## Run telegraf in debug mode
+  debug = false
+  ## Run telegraf in quiet mode
+  quiet = false
+  ## Specify the log file name. The empty string means to log to stdout.
+  logfile = "G:/Telegraf/telegraf.log"
+
+  ## Override default hostname, if empty use os.Hostname()
+  hostname = ""
+
+
+###############################################################################
+#                                  OUTPUTS                                    #
+###############################################################################
+
+# Configuration for influxdb server to send metrics to
+[[outputs.influxdb]]
+  # The full HTTP or UDP endpoint URL for your InfluxDB instance.
+  # Multiple urls can be specified but it is assumed that they are part of the same
+  # cluster, this means that only ONE of the urls will be written to each interval.
+  # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
+  urls = ["{{INFLUXDB_URL}}"] # required
+  # The target database for metrics (telegraf will create it if not exists)
+  database = "{{DATABASE_NAME}}" # required
+  # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+  # note: using second precision greatly helps InfluxDB compression
+  precision = "s"
+
+  ## Write timeout (for the InfluxDB client), formatted as a string.
+  ## If not provided, will default to 5s. 0s means no timeout (not recommended).
+  timeout = "5s"
+  # username = "telegraf"
+  # password = "metricsmetricsmetricsmetrics"
+  # Set the user agent for HTTP POSTs (can be useful for log differentiation)
+  # user_agent = "telegraf"
+  # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
+  # udp_payload = 512
+
+
+###############################################################################
+#                                  INPUTS                                     #
+###############################################################################
+# # Influx HTTP write listener
+[[inputs.http_listener]]
+  ## Address and port to host HTTP listener on
+  service_address = ":8186"
+
+  ## timeouts
+  read_timeout = "10s"
+  write_timeout = "10s"
+
+  ## HTTPS
+  #tls_cert= "/etc/telegraf/cert.pem"
+  #tls_key = "/etc/telegraf/key.pem"
+
+  ## MTLS
+  #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+
+[[inputs.mongodb]]
+  ## An array of URLs of the form:
+  ##   "mongodb://" [user ":" pass "@"] host [ ":" port]
+  ## For example:
+  ##   mongodb://user:auth_key@10.10.3.30:27017,
+  ##   mongodb://10.10.3.33:18832,
+  servers = ["mongodb://127.0.0.1:27017"]
+  gather_perdb_stats = false
+
+  ## Optional SSL Config
+  # ssl_ca = "/etc/telegraf/ca.pem"
+  # ssl_cert = "/etc/telegraf/cert.pem"
+  # ssl_key = "/etc/telegraf/key.pem"
+  ## Use SSL but skip chain & host verification
+  # insecure_skip_verify = false
\ No newline at end of file
diff --git a/test/services/nginx/install-nginx.sh b/test/services/nginx/install-nginx.sh
new file mode 100644
index 0000000000000000000000000000000000000000..5f44ce11d0a7fdf02e9c6d3cc306c9939263452a
--- /dev/null
+++ b/test/services/nginx/install-nginx.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+#/////////////////////////////////////////////////////////////////////////
+#//
+#// (c) University of Southampton IT Innovation Centre, 2017
+#//
+#// Copyright in this software belongs to University of Southampton
+#// IT Innovation Centre of Gamma House, Enterprise Road,
+#// Chilworth Science Park, Southampton, SO16 7NS, UK.
+#//
+#// This software may not be used, sold, licensed, transferred, copied
+#// or reproduced in whole or in part in any manner or form or in or
+#// on any media by any person other than in accordance with the terms
+#// of the Licence Agreement supplied with the software, or otherwise
+#// without the prior written consent of the copyright owners.
+#//
+#// This software is distributed WITHOUT ANY WARRANTY, without even the
+#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+#// PURPOSE, except where stated in the Licence Agreement supplied with
+#// the software.
+#//
+#//      Created By :            Michael Boniface
+#//      Created Date :          01/02/2018
+#//      Created for Project :   FLAME
+#//
+#/////////////////////////////////////////////////////////////////////////
+
+# Install nginx
+sudo apt-get update
+yes Y | sudo apt-get install nginx 
+
+# Need to set up basic stats as this not configured by default
+# http://nginx.org/en/docs/http/ngx_http_stub_status_module.html
+
+nginx -s reload
+
+# start NGINX
+systemctl start nginx
\ No newline at end of file
diff --git a/test/services/nginx/telegraf_nginx_template.conf b/test/services/nginx/telegraf_nginx_template.conf
new file mode 100644
index 0000000000000000000000000000000000000000..31c97d16ee761205e56d77adba38f74d950705bb
--- /dev/null
+++ b/test/services/nginx/telegraf_nginx_template.conf
@@ -0,0 +1,127 @@
+# Telegraf configuration
+
+# Telegraf is entirely plugin driven. All metrics are gathered from the
+# declared inputs, and sent to the declared outputs.
+
+# Plugins must be declared in here to be active.
+# To deactivate a plugin, comment out the name and any variables.
+
+# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
+# file would generate.
+
+# Global tags can be specified here in key="value" format.
+[global_tags]
+  # location of the data centre
+  location="{{LOCATION}}"
+  # media service template id
+  sfc="{{SFC_ID}}"
+  # media service instance
+  sfc_i="{{SFC_ID_INSTANCE}}"
+  # service function type
+  sf="{{SF_ID}}"
+  # service function instance id
+  sf_i="{{SF_ID_INSTANCE}}"
+  # ipendpoint id aka surrogate instance
+  ipendpoint="{{IP_ENDPOINT_ID}}"
+
+# Configuration for telegraf agent
+[agent]
+  ## Default data collection interval for all inputs
+  interval = "10s"
+  ## Rounds collection interval to 'interval'
+  ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
+  round_interval = true
+
+  ## Telegraf will cache metric_buffer_limit metrics for each output, and will
+  ## flush this buffer on a successful write.
+  metric_buffer_limit = 1000
+  ## Flush the buffer whenever full, regardless of flush_interval.
+  flush_buffer_when_full = true
+
+  ## Collection jitter is used to jitter the collection by a random amount.
+  ## Each plugin will sleep for a random time within jitter before collecting.
+  ## This can be used to avoid many plugins querying things like sysfs at the
+  ## same time, which can have a measurable effect on the system.
+  collection_jitter = "0s"
+
+  ## Default flushing interval for all outputs. You shouldn't set this below
+  ## interval. Maximum flush_interval will be flush_interval + flush_jitter
+  flush_interval = "10s"
+  ## Jitter the flush interval by a random amount. This is primarily to avoid
+  ## large write spikes for users running a large number of telegraf instances.
+  ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
+  flush_jitter = "0s"
+
+  ## Logging configuration:
+  ## Run telegraf in debug mode
+  debug = false
+  ## Run telegraf in quiet mode
+  quiet = false
+  ## Specify the log file name. The empty string means to log to stdout.
+  logfile = "G:/Telegraf/telegraf.log"
+
+  ## Override default hostname, if empty use os.Hostname()
+  hostname = ""
+
+
+###############################################################################
+#                                  OUTPUTS                                    #
+###############################################################################
+
+# Configuration for influxdb server to send metrics to
+[[outputs.influxdb]]
+  # The full HTTP or UDP endpoint URL for your InfluxDB instance.
+  # Multiple urls can be specified but it is assumed that they are part of the same
+  # cluster, this means that only ONE of the urls will be written to each interval.
+  # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
+  urls = ["{{INFLUXDB_URL}}"] # required
+  # The target database for metrics (telegraf will create it if not exists)
+  database = "{{DATABASE_NAME}}" # required
+  # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+  # note: using second precision greatly helps InfluxDB compression
+  precision = "s"
+
+  ## Write timeout (for the InfluxDB client), formatted as a string.
+  ## If not provided, will default to 5s. 0s means no timeout (not recommended).
+  timeout = "5s"
+  # username = "telegraf"
+  # password = "metricsmetricsmetricsmetrics"
+  # Set the user agent for HTTP POSTs (can be useful for log differentiation)
+  # user_agent = "telegraf"
+  # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
+  # udp_payload = 512
+
+
+###############################################################################
+#                                  INPUTS                                     #
+###############################################################################
+# # Influx HTTP write listener
+[[inputs.http_listener]]
+  ## Address and port to host HTTP listener on
+  service_address = ":8186"
+
+  ## timeouts
+  read_timeout = "10s"
+  write_timeout = "10s"
+
+  ## HTTPS
+  #tls_cert= "/etc/telegraf/cert.pem"
+  #tls_key = "/etc/telegraf/key.pem"
+
+  ## MTLS
+  #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+
+# Read Nginx's basic status information (ngx_http_stub_status_module)
+[[inputs.nginx]]
+  ## An array of Nginx stub_status URI to gather stats.
+  urls = ["http://localhost/server_status"]
+
+  ## Optional SSL Config
+  # ssl_ca = "/etc/telegraf/ca.pem"
+  # ssl_cert = "/etc/telegraf/cert.pem"
+  # ssl_key = "/etc/telegraf/key.pem"
+  ## Use SSL but skip chain & host verification
+  # insecure_skip_verify = false
+
+  ## HTTP response timeout (default: 5s)
+  response_timeout = "5s"
\ No newline at end of file
diff --git a/test/streaming-sim/LineProtocolGenerator.py b/test/streaming-sim/LineProtocolGenerator.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d4b07736b3fa3b318754d411aaeb1d91aa2f537
--- /dev/null
+++ b/test/streaming-sim/LineProtocolGenerator.py
@@ -0,0 +1,307 @@
+# line protocol
+
+# Method to create a full InfluxDB request statement (based on partial statement from client)
+import uuid
+from random import random, randint
+
+
+# Reports TX and RX, scaling on requested quality
+def generate_network_report(recieved_bytes, sent_bytes, time):
+    # Measurement
+    result = 'net_port_io'
+    # Tags
+    result += ',port_id=enps03 '
+    # Fields
+    result += 'RX_BYTES_PORT_M=' + str(recieved_bytes) + ","
+    result += 'TX_BYTES_PORT_M=' + str(sent_bytes)
+    # Timestamp
+    result += ' ' + str(_getNSTime(time))
+
+    # Measurement
+    #print(result)
+    return result
+
+
+# Formats VM config
+def generate_vm_config(state, cpu, mem, storage, time):
+    # metric
+    result = 'vm_res_alloc'
+    # Tags
+    result += ',vm_state=' + quote_wrap(state)
+    result += ' '
+    # Fields
+    result += 'cpu=' + str(cpu)
+    result += ',memory=' + quote_wrap(mem)
+    result += ',storage=' + quote_wrap(storage)
+
+    # Time
+    result += ' ' + str(_getNSTime(time))
+
+    print(result)
+    return result
+
+
+# Reports cpu usage, scaling on requests
+def generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, time):
+    result = 'cpu_usage'
+    # Tag
+    result += ' '
+    # field
+    result += 'cpu_usage='+str(cpu_usage)
+    result += ',cpu_active_time='+str(cpu_active_time)
+    result += ',cpu_idle_time='+str(cpu_idle_time)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
+
+
+# Reports response times, scaling on number of requests
+def generate_mpegdash_report(resource, requests, avg_response_time, peak_response_time, time):
+    # Measurement
+    result = 'mpegdash_service'
+    # Tags
+    result += ',cont_nav=\"' + str(resource) + "\" "
+    # Fields
+
+    # result += 'cont_rep=' + str(quality) + ','
+    result += 'requests=' + str(requests) + ','
+    result += 'avg_response_time=' + str(avg_response_time) + ','
+    result += 'peak_response_time=' + str(peak_response_time)
+    # Timestamp
+    result += ' ' + str(_getNSTime(time))
+    print(result)
+    return result
+
+#ipendpoint_route,ipendpoint_id,cont_nav=FQDN HTTP_REQUESTS_FQDN_M, NETWORK_FQDN_LATENCY timestamp
+def generate_ipendpoint_route(resource, requests, latency, time):
+    # Measurement
+    result = 'ipendpoint_route'
+    # Tags
+    result += ',cont_nav=\"' + str(resource) + "\" "
+    # Fields
+
+    # result += 'cont_rep=' + str(quality) + ','
+    result += 'http_requests_fqdn_m=' + str(requests) + ','
+    result += 'network_fqdn_latency=' + str(latency)
+    # Timestamp
+    result += ' ' + str(_getNSTime(time))
+    #print(result)
+    return result
+
+# Influx needs strings to be quoted, this provides a utility interface to do this
+def quote_wrap(str):
+    return "\"" + str + "\""
+
+
+# InfluxDB likes to have time-stamps in nanoseconds
+def _getNSTime(time):
+    # Convert to nano-seconds
+    timestamp = int(1000000000*time)
+    #print("timestamp", timestamp)
+    return timestamp
+
+# DEPRICATED
+# ____________________________________________________________________________
+
+# DEPRICATED: old structure, not part of new spec
+def _generateClientRequest(cReq, id, time):
+    # Tags first
+    result = 'sid="' + str(id) + '",' + cReq
+
+    # Fields
+    # No additional fields here yet
+
+    # Timestamp
+    result += ' ' + str(_getNSTime(time))
+
+    # Measurement
+    return 'request,' + result
+
+
+# Method to create a full InfluxDB response statement
+# DEPRECATED: old structure, not part of new spec
+def _generateServerResponse(reqID, quality, time, cpuUsage, qualityDifference):
+    # Tags first
+    result = ' '
+
+    # Fields
+    result += 'quality=' + str(quality) + ','
+    result += 'cpuUsage=' + str(cpuUsage) + ','
+    result += 'qualityDifference=' + str(qualityDifference) + ','
+    result += 'requestID="' + str(reqID) + '",'
+    result += 'index="' + str(uuid.uuid4()) + '"'
+
+    # Timestamp
+    result += ' ' + str(_getNSTime(time))
+
+    # Measurement
+    # print('response'+result)
+    return 'response' + result
+
+
+
+# Formats server config
+def _generateServerConfig(ID, location, cpu, mem, storage, time):
+    # metric
+    result = 'host_resource'
+    # Tags
+    result += ',slice_id=' + quote_wrap(ID)
+    result += ',location=' + quote_wrap(location)
+    result += ' '
+    # Fields
+    result += 'cpu=' + str(cpu)
+    result += ',memory=' + quote_wrap(mem)
+    result += ',storage=' + quote_wrap(storage)
+
+    # Time
+    result += ' ' + str(_getNSTime(time))
+
+    print(result)
+    return result
+
+
+
+# Format port config
+def _configure_port(port_id, state, rate, time):
+    # metric
+    result = 'net_port_config '
+    # Fields
+    result += 'port_id=' + quote_wrap('enps' + port_id)
+    result += ',port_state=' + quote_wrap(state)
+    result += ',tx_constraint=' + quote_wrap(rate)
+    result += ' '
+
+    # Time
+    result += ' ' + str(_getNSTime(time))
+
+    print(result)
+    return result
+
+
+# Format service function config
+def _configure_service_function(state, max_connected_clients):
+    # measurement
+    result = 'mpegdash_service_config'
+    # tags
+    result += ',service_state='+quote_wrap(state)
+    result += ' '
+    # fields
+    result += 'max_connected_clients='+str(max_connected_clients)
+
+    return result
+
+
+
+# Reports memory usage, scaling on requests
+def generate_mem_report(requests, total_mem, time):
+    # Measurement
+    result = 'mem'
+    result += ' '
+    # field
+    used = randint(0, min(100,5*requests))
+    available = 100-used
+    result += 'available_percent='+str(available)
+    result += ',used_percent='+str(used)
+    result += ',total='+str(total_mem)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
+
+
+# Formats compute node config
+def generate_compute_node_config(slice_id, location, node_id, cpus, mem, storage, time):
+    # Measurement
+    result = 'compute_node_config'
+    # CommonContext Tag
+    result += ',slide_id='+quote_wrap(slice_id)
+    # Tag
+    result += ',location='+quote_wrap(location)
+    result += ',comp_node_id='+quote_wrap(node_id)
+    result += ' '
+    # field
+    result += 'cpus='+str(cpus)
+    result += ',memory='+str(mem)
+    result += ',storage='+str(storage)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
+
+
+# Formats network resource config
+def generate_network_resource_config(slice_id, network_id, bandwidth, time):
+    # Measurement
+    result = 'network_resource_config'
+    # Meta Tag
+    result += ',slice_id='+quote_wrap(slice_id)
+    # Tag
+    result += 'network_id='+quote_wrap(network_id)
+    result += ' '
+    # field
+    result += 'bandwidth='+str(bandwidth)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
+
+
+# Formats network interface config
+def generate_network_interface_config(slice_id, comp_node_id, port_id, rx_constraint, tx_constraint, time):
+    # Measurement
+    result = 'network_interface_config'
+    # Meta Tag
+    result += ',slice_id'+quote_wrap(slice_id)
+    # Tags
+    result += ',comp_node_id='+quote_wrap(comp_node_id)
+    result += ',port_id='+quote_wrap(port_id)
+    result += ' '
+    # field
+    result += 'rx_constraint='+str(rx_constraint)
+    result += ',tx_constraint='+str(tx_constraint)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
+
+
+# Format SF instance config
+def generate_sf_instance_surrogate_config(loc, sfc, sfc_i, sf_package, sf_i, cpus, mem, storage, time):
+    # Measurement
+    result = 'sf_instance_surrogate_config'
+    # Meta Tag
+    result += ',location'+quote_wrap(loc)
+    result += ',sfc'+quote_wrap(sfc)
+    result += ',sfc_i'+quote_wrap(sfc_i)
+    result += ',sf_package'+quote_wrap(sf_package)
+    result += ',sf_i'+quote_wrap(sf_i)
+    result += ' '
+    # field
+    result += 'cpus='+str(cpus)
+    result += ',memory='+str(mem)
+    result += ',storage='+str(storage)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
+
+
+# Formats context container as part of other line protocol generators
+def service_function_measurement(measurement, service_function_context):
+    result = measurement
+    result += ',sfc'+quote_wrap(service_function_context.sfc)
+    result += ',sfc_i'+quote_wrap(service_function_context.sfc_i)
+    result += ',sf_package'+quote_wrap(service_function_context.sf_package)
+    result += ',sf_i'+quote_wrap(service_function_context.sf_i)
+
+    return result
+
+
+
diff --git a/test/streaming-sim/StreamingSim.py b/test/streaming-sim/StreamingSim.py
new file mode 100644
index 0000000000000000000000000000000000000000..0182e75dc99b9e9f28ffad87a0d4d40e5929d67b
--- /dev/null
+++ b/test/streaming-sim/StreamingSim.py
@@ -0,0 +1,203 @@
+import LineProtocolGenerator as lp
+import time
+import urllib.parse
+import urllib.request
+import sys
+import random
+
+# Simulation parameters
+TICK_TIME = 1
+DEFAULT_REQUEST_RATE_INC = 1
+DEFAULT_REQUEST_RATE_INC_PERIOD = 10 
+SIMULATION_TIME_SEC = 60*60
+
+# CLMC parameters
+INFLUX_DB_URL = 'http://192.168.50.10:8086'
+AGENT_URL1 = 'http://192.168.50.11:8186'
+AGENT_URL2 = 'http://192.168.50.12:8186'
+
+# Simulator for services
+class sim:
+    def __init__(self, influx_url):
+        # We don't need this as the db is CLMC metrics
+        self.influx_db = 'CLMCMetrics'
+        self.influx_url = influx_url
+        # Teardown DB from previous sim and bring it back up
+        self._deleteDB()
+        self._createDB()
+
+
+    def run(self, simulation_length_seconds):
+        start_time = time.time()-SIMULATION_TIME_SEC
+        sim_time = start_time
+
+        # segment_size : the length of video requested at a time
+        # bit_rate: MPEG-2 High 1080p 25fps = 80Mbps
+        ip_endpoints = [{'agent_url': AGENT_URL1, 'location': 'DC1', 'cpu': 16,
+                        'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0,
+                        'segment_size': 2, 'video_bit_rate': 80, 'packet_size': 1500},
+                        {'agent_url': AGENT_URL2, 'location': 'DC2', 'cpu': 4, 
+                        'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0, 
+                        'segment_size': 2, 'video_bit_rate': 80, 'packet_size': 1500}
+                        ]
+
+        # Simulate configuration of the ipendpoints
+        # endpoint state->mu, sigma, secs normal distribution
+        config_delay_dist = {"placing": [10, 0.68], "booting": [10, 0.68],"connecting": [10, 0.68]}
+
+        # Place endpoints
+        max_delay = 0              
+        for ip_endpoint in ip_endpoints:
+            delay_time = self._changeVMState(sim_time, ip_endpoint, config_delay_dist['placing'][0], config_delay_dist['placing'][0]*config_delay_dist['placing'][1], 'placing', 'placed')
+            if delay_time > max_delay:
+                max_delay = delay_time
+        sim_time +=max_delay
+
+        # Boot endpoints
+        max_delay = 0        
+        for ip_endpoint in ip_endpoints:
+            delay_time = self._changeVMState(sim_time, ip_endpoint, config_delay_dist['booting'][0], config_delay_dist['booting'][0]*config_delay_dist['booting'][1], 'booting', 'booted')
+            if delay_time > max_delay:
+                max_delay = delay_time            
+        sim_time +=max_delay
+
+        # Connect endpoints
+        max_delay = 0     
+        for ip_endpoint in ip_endpoints:
+            delay_time = self._changeVMState(sim_time, ip_endpoint, config_delay_dist['connecting'][0], config_delay_dist['connecting'][0]*config_delay_dist['connecting'][1], 'connecting', 'connected')
+            if delay_time > max_delay:
+                max_delay = delay_time
+        sim_time +=max_delay
+   
+        request_arrival_rate_inc = DEFAULT_REQUEST_RATE_INC
+        request_queue = 0
+        inc_period_count = 0
+        for i in range(simulation_length_seconds):        
+            for ip_endpoint in ip_endpoints:
+                request_processing_time = 0
+                cpu_time_available = 0
+                requests_processed = 0
+                max_requests_processed = 0
+                cpu_active_time = 0
+                cpu_idle_time = 0
+                cpu_usage = 0
+                cpu_load_time = 0
+                avg_response_time = 0
+                peak_response_time = 0
+
+                # linear inc to arrival rate
+                if inc_period_count >= DEFAULT_REQUEST_RATE_INC_PERIOD:
+                    ip_endpoint['request_arrival_rate'] += request_arrival_rate_inc
+                    inc_period_count = 0
+                else:
+                    inc_period_count += 1
+                # add new requests to the queue
+                ip_endpoint['request_queue'] += ip_endpoint['request_arrival_rate']
+
+                # time to process one second of video (mS) in the current second
+                request_processing_time = int(random.normalvariate(10, 10*0.68))
+                if request_processing_time <= 10:
+                    request_processing_time = 10
+                # time depends on the length of the segments in seconds
+                request_processing_time *= ip_endpoint['segment_size']
+
+                # amount of cpu time (mS) per tick
+                cpu_time_available = ip_endpoint['cpu']*TICK_TIME*1000
+                max_requests_processed = int(cpu_time_available/request_processing_time)
+                # calc how many requests processed
+                if ip_endpoint['request_queue'] <= max_requests_processed:
+                    # processed all of the requests
+                    requests_processed = ip_endpoint['request_queue']
+                else:
+                    # processed the maxmum number of requests
+                    requests_processed = max_requests_processed
+
+                # calculate cpu usage
+                cpu_active_time = int(requests_processed*request_processing_time)
+                cpu_idle_time = int(cpu_time_available-cpu_active_time)
+                cpu_usage = cpu_active_time/cpu_time_available
+                self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, sim_time))
+
+                # calc network usage metrics
+                bytes_rx = 2048*requests_processed           
+                bytes_tx = int(ip_endpoint['video_bit_rate']/8*1000000*requests_processed*ip_endpoint['segment_size'])
+                self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_network_report(bytes_rx, bytes_tx, sim_time))                
+
+                # time to process all of the requests in the queue
+                peak_response_time = ip_endpoint['request_queue']*request_processing_time/ip_endpoint['cpu']
+                # mid-range 
+                avg_response_time = (peak_response_time+request_processing_time)/2
+                self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_mpegdash_report('http://localhost/server-status?auto', ip_endpoint['request_arrival_rate'], avg_response_time, peak_response_time, sim_time))
+
+                # need to calculate this but sent at 5mS for now
+                network_request_delay = 0.005
+
+                # calculate network response delays (2km link, 100Mbps)
+                network_response_delay = self._calcNetworkDelay(2000, 100, ip_endpoint['packet_size'], ip_endpoint['video_bit_rate'], ip_endpoint['segment_size'])
+
+                e2e_delay = network_request_delay + (avg_response_time/1000) + network_response_delay
+
+                self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_ipendpoint_route('http://localhost/server-status?auto', ip_endpoint['request_arrival_rate'], e2e_delay, sim_time))
+
+                # remove requests processed off the queue
+                ip_endpoint['request_queue'] -= int(requests_processed)            
+
+            sim_time += TICK_TIME
+        end_time = sim_time
+        print("Simulation Finished. Start time {0}. End time {1}. Total time {2}".format(start_time,end_time,end_time-start_time))
+
+    # distance metres
+    # bandwidth Mbps
+    # package size bytes
+    # tx_video_bit_rate bp/sec
+    # segment size sec
+    def _calcNetworkDelay(self, distance, bandwidth, packet_size, tx_video_bit_rate, segment_size):
+        response_delay = 0
+
+        # propogation delay = distance/speed () (e.g 2000 metres * 2*10^8 for optical fibre)
+        propogation_delay = distance/(2*100000000)
+        # packetisation delay = ip packet size (bits)/tx rate (e.g. 100Mbp with  0% packet loss)
+        packetisation_delay = (packet_size*8)/(bandwidth*1000000)
+    #    print('packetisation_delay:', packetisation_delay)   
+        # total number of packets to be sent
+        packets = (tx_video_bit_rate*1000000)/(packet_size*8)
+     #   print('packets:', packets)        
+        response_delay = packets*(propogation_delay+packetisation_delay)
+      #  print('response_delay:', response_delay)
+
+        return response_delay     
+
+    def _changeVMState(self, sim_time, ip_endpoint, mu, sigma, transition_state, next_state):
+        delay_time = 0
+    
+        self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config(transition_state, ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], sim_time))   
+        
+        delay_time = random.normalvariate(mu, sigma)        
+        
+        self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config(next_state, ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], sim_time+delay_time))
+
+        return delay_time
+
+    def _createDB(self):
+        self._sendInfluxQuery(self.influx_url, 'CREATE DATABASE ' + self.influx_db)
+
+
+    def _deleteDB(self):
+        self._sendInfluxQuery(self.influx_url, 'DROP DATABASE ' + self.influx_db)
+
+
+    def _sendInfluxQuery(self, url, query):
+        query = urllib.parse.urlencode({'q': query})
+        query = query.encode('ascii')
+        req = urllib.request.Request(url + '/query ', query)
+        urllib.request.urlopen(req)
+
+    def _sendInfluxData(self, url, data):
+        data = data.encode()
+        header = {'Content-Type': 'application/octet-stream'}
+        req = urllib.request.Request(url + '/write?db=' + self.influx_db, data, header)
+        urllib.request.urlopen(req)  
+
+simulator = sim(INFLUX_DB_URL)
+simulator.run(SIMULATION_TIME_SEC)
+