diff --git a/src/mediaServiceSim/LineProtocolGenerator.py b/src/mediaServiceSim/LineProtocolGenerator.py
index da8ba0d10b426582448229b2339040464f3b1589..1f552229d2db58686ce090a0a954395df8e9a8f7 100644
--- a/src/mediaServiceSim/LineProtocolGenerator.py
+++ b/src/mediaServiceSim/LineProtocolGenerator.py
@@ -4,6 +4,95 @@
 import uuid
 from random import random, randint
 
+
+# Reports TX and RX, scaling on requested quality
+def generate_network_report(recieved_bytes, sent_bytes, time):
+    # Measurement
+    result = 'net_port_io'
+    # Tags
+    result += ',port_id=enps03 '
+    # Fields
+    result += 'RX_BYTES_PORT_M=' + str(recieved_bytes) + ","
+    result += 'TX_BYTES_PORT_M=' + str(sent_bytes)
+    # Timestamp
+    result += ' ' + str(_getNSTime(time))
+
+    # Measurement
+    # print('network'+result)
+    return result
+
+
+# Formats VM config
+def generate_vm_config(state, cpu, mem, storage, time):
+    # metric
+    result = 'vm_res_alloc'
+    # Tags
+    result += ',vm_state=' + quote_wrap(state)
+    result += ' '
+    # Fields
+    result += 'cpu=' + str(cpu)
+    result += ',memory=' + quote_wrap(mem)
+    result += ',storage=' + quote_wrap(storage)
+
+    # Time
+    result += ' ' + str(_getNSTime(time))
+
+    print(result)
+    return result
+
+
+# Reports cpu usage, scaling on requests
+def generate_cpu_report(loc, sfc, sfc_i, sf_package, sf_i, cpu_useage, cpu_usage_system, time):
+    result = 'vm_host_cpu_usage'
+    # Tag
+    result += ',location='+quote_wrap(loc)
+    result += ',sfc='+quote_wrap(sfc)
+    result += ',sfc_i='+quote_wrap(sfc_i)
+    result += ',sf_package='+quote_wrap(sf_package)
+    result += ',sf_i='+quote_wrap(sf_i)
+    result += ' '
+    # field
+    result += 'cpu_usage='+str(cpu_useage)
+    result += ',cpu_usage_system='+str(cpu_usage_system)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
+
+
+# Reports response times, scaling on number of requests
+def generate_mpegdash_report(resource, requests, avg_response_time, peak_response_time, time):
+    # Measurement
+    result = 'mpegdash_service '
+    # Tags
+    # None
+    # Fields
+    result += 'cont_nav=\"' + str(resource) + "\","
+    # result += 'cont_rep=' + str(quality) + ','
+    result += 'requests=' + str(requests) + ','
+    result += 'avg_response_time=' + str(avg_response_time) + ','
+    result += 'peak_response_time=' + str(peak_response_time)
+    # Timestamp
+    result += ' ' + str(_getNSTime(time))
+    # print(result)
+    return result
+
+
+# Influx needs strings to be quoted, this provides a utility interface to do this
+def quote_wrap(str):
+    return "\"" + str + "\""
+
+
+# InfluxDB likes to have time-stamps in nanoseconds
+def _getNSTime(time):
+    # Convert to nano-seconds
+    return 1000000 * time
+
+# DEPRICATED
+# ____________________________________________________________________________
+
+# DEPRICATED: old structure, not part of new spec
 def _generateClientRequest(cReq, id, time):
     # Tags first
     result = 'sid="' + str(id) + '",' + cReq
@@ -19,6 +108,7 @@ def _generateClientRequest(cReq, id, time):
 
 
 # Method to create a full InfluxDB response statement
+# DEPRECATED: old structure, not part of new spec
 def _generateServerResponse(reqID, quality, time, cpuUsage, qualityDifference):
     # Tags first
     result = ' '
@@ -38,42 +128,8 @@ def _generateServerResponse(reqID, quality, time, cpuUsage, qualityDifference):
     return 'response' + result
 
 
-def _generateNetworkReport(sum_of_client_quality, time):
-    # Measurement
-    result = 'net_port_io'
-    # Tags
-    result += ',port_id=enps03 '
-    # Fields
-    result += 'RX_BYTES_PORT_M=' + str(sum_of_client_quality * 32) + ","
-    result += 'TX_BYTES_PORT_M=' + str(sum_of_client_quality * 1024)
-    # Timestamp
-    result += ' ' + str(_getNSTime(time))
-
-    # Measurement
-    # print('network'+result)
-    return result
-
-
-def _generateMpegDashReport(resource, quality, time):
-    # Measurement
-    result = 'mpegdash_service '
-    # Tags
-    #None
-    # Fields
-    requests = randint(10, 30)
-    avg_response_time = 50 + randint(0, 100) + randint(0, 10 * quality)
-    peak_response_time = avg_response_time + randint(30, 60) + randint(5, 10) * quality
-    result += 'cont_nav=\"' + str(resource) + "\","
-    result += 'cont_rep=' + str(quality) + ','
-    result += 'requests=' + str(requests) + ','
-    result += 'avg_response_time=' + str(avg_response_time) + ','
-    result += 'peak_response_time=' + str(peak_response_time)
-    # Timestamp
-    result += ' ' + str(_getNSTime(time))
-    #print(result)
-    return result
-
 
+# Formats server config
 def _generateServerConfig(ID, location, cpu, mem, storage, time):
     # metric
     result = 'host_resource'
@@ -93,24 +149,8 @@ def _generateServerConfig(ID, location, cpu, mem, storage, time):
     return result
 
 
-def _generateVMConfig(state, cpu, mem, storage, time):
-    # metric
-    result = 'vm_res_alloc'
-    # Tags
-    result += ',vm_state=' + quote_wrap(state)
-    result += ' '
-    # Fields
-    result += 'cpu=' + str(cpu)
-    result += ',memory=' + quote_wrap(mem)
-    result += ',storage=' + quote_wrap(storage)
-
-    # Time
-    result += ' ' + str(_getNSTime(time))
-
-    print(result)
-    return result
-
 
+# Format port config
 def _configure_port(port_id, state, rate, time):
     # metric
     result = 'net_port_config '
@@ -126,6 +166,8 @@ def _configure_port(port_id, state, rate, time):
     print(result)
     return result
 
+
+# Format service function config
 def _configure_service_function(state, max_connected_clients):
     # measurement
     result = 'mpegdash_service_config'
@@ -137,33 +179,15 @@ def _configure_service_function(state, max_connected_clients):
 
     return result
 
-# Simulating telegraf reporting
-def generate_CPU_report(requests, time):
-    # Measurement
-    result = 'cpu'
-    # meta tag
-    # We are simulating the summed CPUs, individual CPUs would have cpu=cpuNumber instead
-    result += ',cpu="cpu-total"'
-    result += ' '
-    # field
-    system = randint(0, min(100, requests*5))
-    steal = randint(0, 100-system)
-    idle = 100-(system+steal)
-    result += 'usage_steal='+str(steal/100)
-    result += ',usage_system='+str(system/100)
-    result += ',usage_idle='+str(idle/100)
-    result += ' '
-    # Time
-    result += str(_getNSTime(time))
-    print(result)
-    return result
 
-def generate_mem_report(total_mem, time):
+
+# Reports memory usage, scaling on requests
+def generate_mem_report(requests, total_mem, time):
     # Measurement
     result = 'mem'
     result += ' '
     # field
-    used = randint(30, 80)
+    used = randint(0, min(100,5*requests))
     available = 100-used
     result += 'available_percent='+str(available)
     result += ',used_percent='+str(used)
@@ -174,6 +198,8 @@ def generate_mem_report(total_mem, time):
     print(result)
     return result
 
+
+# Formats compute node config
 def generate_compute_node_config(slice_id, location, node_id, cpus, mem, storage, time):
     # Measurement
     result = 'compute_node_config'
@@ -193,6 +219,8 @@ def generate_compute_node_config(slice_id, location, node_id, cpus, mem, storage
     print(result)
     return result
 
+
+# Formats network resource config
 def generate_network_resource_config(slice_id, network_id, bandwidth, time):
     # Measurement
     result = 'network_resource_config'
@@ -209,6 +237,8 @@ def generate_network_resource_config(slice_id, network_id, bandwidth, time):
     print(result)
     return result
 
+
+# Formats network interface config
 def generate_network_interface_config(slice_id, comp_node_id, port_id, rx_constraint, tx_constraint, time):
     # Measurement
     result = 'network_interface_config'
@@ -228,15 +258,37 @@ def generate_network_interface_config(slice_id, comp_node_id, port_id, rx_constr
     return result
 
 
+# Format SF instance config
+def generate_sf_instance_surrogate_config(loc, sfc, sfc_i, sf_package, sf_i, cpus, mem, storage, time):
+    # Measurement
+    result = 'sf_instance_surrogate_config'
+    # Meta Tag
+    result += ',location'+quote_wrap(loc)
+    result += ',sfc'+quote_wrap(sfc)
+    result += ',sfc_i'+quote_wrap(sfc_i)
+    result += ',sf_package'+quote_wrap(sf_package)
+    result += ',sf_i'+quote_wrap(sf_i)
+    result += ' '
+    # field
+    result += 'cpus='+str(cpus)
+    result += ',memory='+str(mem)
+    result += ',storage='+str(storage)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
 
 
+# Formats context container as part of other line protocol generators
+def service_function_measurement(measurement, service_function_context):
+    result = measurement
+    result += ',sfc'+quote_wrap(service_function_context.sfc)
+    result += ',sfc_i'+quote_wrap(service_function_context.sfc_i)
+    result += ',sf_package'+quote_wrap(service_function_context.sf_package)
+    result += ',sf_i'+quote_wrap(service_function_context.sf_i)
+
+    return result
 
-# Influx needs strings to be quoted, this provides a utility interface to do this
-def quote_wrap(str):
-    return "\"" + str + "\""
 
 
-# InfluxDB likes to have time-stamps in nanoseconds
-def _getNSTime(time):
-    # Convert to nano-seconds
-    return 1000000 * time
diff --git a/src/mediaServiceSim/__pycache__/LineProtocolGenerator.cpython-36.pyc b/src/mediaServiceSim/__pycache__/LineProtocolGenerator.cpython-36.pyc
index e393358e419e0611903d1c5e4bcb264fabfcb884..b8bb4785ef85f853c379898f7b6b17715734a63b 100644
Binary files a/src/mediaServiceSim/__pycache__/LineProtocolGenerator.cpython-36.pyc and b/src/mediaServiceSim/__pycache__/LineProtocolGenerator.cpython-36.pyc differ
diff --git a/src/mediaServiceSim/serviceSim.py b/src/mediaServiceSim/serviceSim.py
index 8ad5baa4432fdc6846a972039e8bf5d70dc5498e..2cdc993af6c0d0b543abbc7b5cd396a3a786fd8e 100644
--- a/src/mediaServiceSim/serviceSim.py
+++ b/src/mediaServiceSim/serviceSim.py
@@ -102,8 +102,9 @@ class DemoClient(object):
         # Return the _partial_ InfluxDB statement (server will complete the rest)
         return result
 
+
 # Used to tell influx to launch or teardown a database (DB name overwritten by telegraf)
-class DatabaseManager():
+class DatabaseManager:
     def __init__(self, influx_url, db_name):
         self.influx_url = influx_url
         self.influx_db = db_name
@@ -126,8 +127,9 @@ class DatabaseManager():
         req = urllib.request.Request(self.influx_url + '/query ', query)
         urllib.request.urlopen(req)
 
+
 # Used to allocate clients to servers
-class ClientManager():
+class ClientManager:
     def __init__(self, servers):
         self.servers = servers
     def generate_new_clients(self, amount):
@@ -138,8 +140,9 @@ class ClientManager():
                     server.assign_client(DemoClient())
                     assigned_count += 1
 
+
 # Simulates nodes not connected directly to clients (e.g. telegraf)
-class Node():
+class Node:
     def __init__(self, influxurl, influxdb, input_cpu):
         self.influx_url = influxurl
         self.influx_db = influxdb
@@ -159,6 +162,8 @@ class Node():
         req = urllib.request.Request(self.influx_url + '/write?db=' + self.influx_db, data, header)
         urllib.request.urlopen(req)
 
+# Container for common SF tags, used as part of generating SF usage reports
+
 
 # DemoServer is the class that simulates the behaviour of the MPEG-DASH server
 class DemoServer(object):
@@ -203,7 +208,7 @@ class DemoServer(object):
             server_conf_block.append(lp._configure_port())
         self._sendInfluxDataBlock(server_conf_block)
 
-    def shutdown_VMs(self):
+    def shutdown_VM(self):
         print("Shutting down VM nodes")
         VM_conf_block = []
         self._generateVMS('stopping', 10, VM_conf_block)