diff --git a/Vagrantfile b/Vagrantfile
index 4c66d3b65381d91d2bc34f62453b6238997ee9b8..24448f2ffc3c9a791fc4407c2ae3555aba9528df 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -26,35 +26,55 @@
 Vagrant.configure("2") do |config|
   config.vm.box = "ubuntu/xenial64"
  
-  config.vm.define "influx" do |my|
+  config.vm.define "clmc" do |my|
+
+      config.vm.network :private_network, ip: "192.168.50.10", virtualbox__intnet: "clmc-net"
+
       my.vm.provider "virtualbox" do |v|
         v.customize ["modifyvm", :id, "--memory", 2048]
         v.customize ["modifyvm", :id, "--cpus", 1]
       end
 
-      # copy resource files into VM
-      config.vm.provision "file", source: "./scripts/influx/telegraf.conf", destination: "$HOME/config/telegraf/telegraf.conf"
-
       # open InfluxDB port
       config.vm.network "forwarded_port", guest: 8086, host: 8086 
 
       # open Chronograf port
       config.vm.network "forwarded_port", guest: 8888, host: 8888
 
-      # open TICK Kapacitor port
+      # open Kapacitor port
       config.vm.network "forwarded_port", guest: 9092, host: 9092
 
-      # open local Telegraf port
-      config.vm.network "forwarded_port", guest: 8186, host: 8186
+      # install the CLMC service
+      config.vm.provision :shell, :path => 'scripts/influx/install-clmc-service.sh'
 
-      # install the TICK stack
-      config.vm.provision :shell, :path => 'scripts/influx/install-tick-stack-vm.sh'
+      # start the CLMC service
+      config.vm.provision :shell, :path => 'scripts/influx/start-clmc-service.sh'
+  end  
 
-      # configure the TICK stack
-      config.vm.provision :shell, :path => 'scripts/influx/configure-tick-stack-vm.sh'
+  config.vm.define "ipendpoint1" do |my|
 
-      # start the TICK stack
-      config.vm.provision :shell, :path => 'scripts/influx/start-tick-stack-services.sh'
+      config.vm.network :private_network, ip: "192.168.50.11", virtualbox__intnet: "clmc-net"
 
-  end  
+      my.vm.provider "virtualbox" do |v|
+        v.customize ["modifyvm", :id, "--memory", 512]
+        v.customize ["modifyvm", :id, "--cpus", 1]
+      end
+
+      # Install CLMC agent
+      config.vm.provision :shell, :path => 'scripts/influx/install-clmc-agent.sh', :args => "/vagrant/scripts/influx/telegraf_ipendpoint1.conf"
+  end    
+
+  config.vm.define "ipendpoint2" do |my|
+
+      config.vm.network :private_network, ip: "192.168.50.12", virtualbox__intnet: "clmc-net"
+
+      my.vm.provider "virtualbox" do |v|
+        v.customize ["modifyvm", :id, "--memory", 512]
+        v.customize ["modifyvm", :id, "--cpus", 1]
+      end
+
+      # Install CLMC agent
+      config.vm.provision :shell, :path => 'scripts/influx/install-clmc-agent.sh', :args => "/vagrant/scripts/influx/telegraf_ipendpoint2.conf"
+  end
+  
 end
diff --git a/scripts/influx/configure-tick-stack-vm.sh b/scripts/influx/install-clmc-agent.sh
similarity index 73%
rename from scripts/influx/configure-tick-stack-vm.sh
rename to scripts/influx/install-clmc-agent.sh
index 433e036f7fb31c073a254fc82d4e87be917de2a4..4a49b802a1271c1d2cd3b603a653ee1345f3fea2 100644
--- a/scripts/influx/configure-tick-stack-vm.sh
+++ b/scripts/influx/install-clmc-agent.sh
@@ -1,7 +1,7 @@
 #!/bin/bash
 #/////////////////////////////////////////////////////////////////////////
 #//
-#// (c) University of Southampton IT Innovation Centre, 2018
+#// (c) University of Southampton IT Innovation Centre, 2017
 #//
 #// Copyright in this software belongs to University of Southampton
 #// IT Innovation Centre of Gamma House, Enterprise Road,
@@ -18,17 +18,18 @@
 #// PURPOSE, except where stated in the Licence Agreement supplied with
 #// the software.
 #//
-#//      Created By :            Simon Crowle
-#//      Created Date :          03/11/2018
+#//      Created By :            Michael Boniface
+#//      Created Date :          13/12/2017
 #//      Created for Project :   FLAME
 #//
 #/////////////////////////////////////////////////////////////////////////
 
-echo Configuring TICK stack services...
+# Install telegraf
+wget https://dl.influxdata.com/telegraf/releases/telegraf_1.3.2-1_amd64.deb
+dpkg -i telegraf_1.3.2-1_amd64.deb
 
-# Configure Telegraf
-systemctl stop telegraf
-
-cp ./config/telegraf/telegraf.conf /etc/telegraf/
+# Copy configuration
+cp $1 /etc/telegraf/telegraf.conf
 
+# Start telegraf
 systemctl start telegraf
\ No newline at end of file
diff --git a/scripts/influx/install-tick-stack-vm.sh b/scripts/influx/install-clmc-service.sh
similarity index 92%
rename from scripts/influx/install-tick-stack-vm.sh
rename to scripts/influx/install-clmc-service.sh
index 0a490315a4fda915a3746afe390b246e13fd2fb6..683eba3727bd189363fe5e0835e09d5e5277c4e1 100644
--- a/scripts/influx/install-tick-stack-vm.sh
+++ b/scripts/influx/install-clmc-service.sh
@@ -24,6 +24,10 @@
 #//
 #/////////////////////////////////////////////////////////////////////////
 
+# install python for the simulator
+sudo apt-get update
+sudo apt-get install python
+
 # install influx
 wget https://dl.influxdata.com/influxdb/releases/influxdb_1.2.4_amd64.deb
 dpkg -i influxdb_1.2.4_amd64.deb
@@ -32,10 +36,6 @@ dpkg -i influxdb_1.2.4_amd64.deb
 wget https://dl.influxdata.com/kapacitor/releases/kapacitor_1.3.1_amd64.deb
 dpkg -i kapacitor_1.3.1_amd64.deb
 
-# install Telegraf
-wget https://dl.influxdata.com/telegraf/releases/telegraf_1.3.2-1_amd64.deb
-dpkg -i telegraf_1.3.2-1_amd64.deb
-
 # install Chronograf
 wget https://dl.influxdata.com/chronograf/releases/chronograf_1.3.3.0_amd64.deb
 dpkg -i chronograf_1.3.3.0_amd64.deb
\ No newline at end of file
diff --git a/scripts/influx/install-tick-stack-docker-compose.sh b/scripts/influx/install-tick-stack-docker-compose.sh
deleted file mode 100644
index 7acc2906313f3612e82247554878a92af4b71410..0000000000000000000000000000000000000000
--- a/scripts/influx/install-tick-stack-docker-compose.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-#/////////////////////////////////////////////////////////////////////////
-#//
-#// (c) University of Southampton IT Innovation Centre, 2017
-#//
-#// Copyright in this software belongs to University of Southampton
-#// IT Innovation Centre of Gamma House, Enterprise Road,
-#// Chilworth Science Park, Southampton, SO16 7NS, UK.
-#//
-#// This software may not be used, sold, licensed, transferred, copied
-#// or reproduced in whole or in part in any manner or form or in or
-#// on any media by any person other than in accordance with the terms
-#// of the Licence Agreement supplied with the software, or otherwise
-#// without the prior written consent of the copyright owners.
-#//
-#// This software is distributed WITHOUT ANY WARRANTY, without even the
-#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-#// PURPOSE, except where stated in the Licence Agreement supplied with
-#// the software.
-#//
-#//      Created By :            Michael Boniface
-#//      Created Date :          13/12/2017
-#//      Created for Project :   FLAME
-#//
-#/////////////////////////////////////////////////////////////////////////
-
-# install docker
-apt-get -y update
-apt-get -y install apt-transport-https ca-certificates curl software-properties-common
-curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
-add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
-apt-get -y update
-apt-get install docker-ce
-
-# to get a specific version look at the cache and run the install with that version
-# apt-cache madison docker-ce
-# apt-get install docker-ce=<VERSION>
-
-# test docker
-# docker run hello-world
-
-# install docker compose
-curl -L https://github.com/docker/compose/releases/download/1.17.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
-chmod +x /usr/local/bin/docker-compose
-# test compose
-docker-compose version 1.17.0, build 1719ceb
-
-# install tick stack
-git clone https://github.com/influxdata/TICK-docker.git /opt
-cd /opt/TICK-docker/1.3
-docker-compose up -d
diff --git a/scripts/influx/start-tick-stack-services.sh b/scripts/influx/start-clmc-service.sh
similarity index 91%
rename from scripts/influx/start-tick-stack-services.sh
rename to scripts/influx/start-clmc-service.sh
index ce453d43e82d320e0b35f436eed217283cb066fd..f92c6b5eaf0c93b5a98585b4aab4182d09e2360e 100644
--- a/scripts/influx/start-tick-stack-services.sh
+++ b/scripts/influx/start-clmc-service.sh
@@ -28,8 +28,4 @@ echo Starting TICK stack services...
 
 systemctl start influxdb
 systemctl start kapacitor
-systemctl start telegraf
-systemctl start chronograf
-
-# test influx
-#curl "http://localhost:8086/query?q=show+databases"
\ No newline at end of file
+systemctl start chronograf
\ No newline at end of file
diff --git a/scripts/influx/telegraf.conf b/scripts/influx/telegraf_ipendpoint1.conf
similarity index 94%
rename from scripts/influx/telegraf.conf
rename to scripts/influx/telegraf_ipendpoint1.conf
index d727cf5fa832d203c085f40ba3ac7756b9dc0f3d..7844869d3dff11c749c209facb63b284e4556dbc 100644
--- a/scripts/influx/telegraf.conf
+++ b/scripts/influx/telegraf_ipendpoint1.conf
@@ -11,9 +11,12 @@
 
 # Global tags can be specified here in key="value" format.
 [global_tags]
-  # dc = "us-east-1" # will tag all metrics with dc=us-east-1
-  # rack = "1a"
-  auth = "IT-Inn"
+  location="DC1"
+  sfc="MS_Template_1"
+  sfc_i="MS_I1"
+  sf="adaptive_streaming"
+  sf_i="adaptive_streaming_I1"
+  ipendpoint="adaptive_streaming_I1_ipendpoint1"
 
 # Configuration for telegraf agent
 [agent]
@@ -65,7 +68,7 @@
   # Multiple urls can be specified but it is assumed that they are part of the same
   # cluster, this means that only ONE of the urls will be written to each interval.
   # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
-  urls = ["http://localhost:8086"] # required
+  urls = ["http://192.168.50.10:8086"] # required
   # The target database for metrics (telegraf will create it if not exists)
   database = "CLMCMetrics" # required
   # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
@@ -95,7 +98,6 @@
 ###############################################################################
 #                                  INPUTS                                     #
 ###############################################################################
-
 # # Influx HTTP write listener
 [[inputs.http_listener]]
   ## Address and port to host HTTP listener on
@@ -110,4 +112,5 @@
   #tls_key = "/etc/telegraf/key.pem"
 
   ## MTLS
-  #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
\ No newline at end of file
+  #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+  
\ No newline at end of file
diff --git a/scripts/influx/telegraf_ipendpoint2.conf b/scripts/influx/telegraf_ipendpoint2.conf
new file mode 100644
index 0000000000000000000000000000000000000000..e0d62af95e5fa42e7462441b263dc295c428b26f
--- /dev/null
+++ b/scripts/influx/telegraf_ipendpoint2.conf
@@ -0,0 +1,116 @@
+# Telegraf configuration
+
+# Telegraf is entirely plugin driven. All metrics are gathered from the
+# declared inputs, and sent to the declared outputs.
+
+# Plugins must be declared in here to be active.
+# To deactivate a plugin, comment out the name and any variables.
+
+# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
+# file would generate.
+
+# Global tags can be specified here in key="value" format.
+[global_tags]
+  location="DC2"
+  sfc="MS_Template_1"
+  sfc_i="MS_I1"
+  sf="adaptive_streaming"
+  sf_i="adaptive_streaming_I1"
+  ipendpoint="adaptive_streaming_I1_ipendpoint2"
+
+# Configuration for telegraf agent
+[agent]
+  ## Default data collection interval for all inputs
+  interval = "10s"
+  ## Rounds collection interval to 'interval'
+  ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
+  round_interval = true
+
+  ## Telegraf will cache metric_buffer_limit metrics for each output, and will
+  ## flush this buffer on a successful write.
+  metric_buffer_limit = 1000
+  ## Flush the buffer whenever full, regardless of flush_interval.
+  flush_buffer_when_full = true
+
+  ## Collection jitter is used to jitter the collection by a random amount.
+  ## Each plugin will sleep for a random time within jitter before collecting.
+  ## This can be used to avoid many plugins querying things like sysfs at the
+  ## same time, which can have a measurable effect on the system.
+  collection_jitter = "0s"
+
+  ## Default flushing interval for all outputs. You shouldn't set this below
+  ## interval. Maximum flush_interval will be flush_interval + flush_jitter
+  flush_interval = "10s"
+  ## Jitter the flush interval by a random amount. This is primarily to avoid
+  ## large write spikes for users running a large number of telegraf instances.
+  ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
+  flush_jitter = "0s"
+
+  ## Logging configuration:
+  ## Run telegraf in debug mode
+  debug = false
+  ## Run telegraf in quiet mode
+  quiet = false
+  ## Specify the log file name. The empty string means to log to stdout.
+  logfile = "G:/Telegraf/telegraf.log"
+
+  ## Override default hostname, if empty use os.Hostname()
+  hostname = ""
+
+
+###############################################################################
+#                                  OUTPUTS                                    #
+###############################################################################
+
+# Configuration for influxdb server to send metrics to
+[[outputs.influxdb]]
+  # The full HTTP or UDP endpoint URL for your InfluxDB instance.
+  # Multiple urls can be specified but it is assumed that they are part of the same
+  # cluster, this means that only ONE of the urls will be written to each interval.
+  # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
+  urls = ["http://192.168.50.10:8086"] # required
+  # The target database for metrics (telegraf will create it if not exists)
+  database = "CLMCMetrics" # required
+  # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+  # note: using second precision greatly helps InfluxDB compression
+  precision = "s"
+
+  ## Write timeout (for the InfluxDB client), formatted as a string.
+  ## If not provided, will default to 5s. 0s means no timeout (not recommended).
+  timeout = "5s"
+  # username = "telegraf"
+  # password = "metricsmetricsmetricsmetrics"
+  # Set the user agent for HTTP POSTs (can be useful for log differentiation)
+  # user_agent = "telegraf"
+  # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
+  # udp_payload = 512
+[[outputs.file]]
+  ## Files to write to, "stdout" is a specially handled file.
+  files = ["stdout", "/tmp/metrics.out"]
+
+  ## Data format to output.
+  ## Each data format has its own unique set of configuration options, read
+  ## more about them here:
+  ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
+  data_format = "influx"
+
+
+###############################################################################
+#                                  INPUTS                                     #
+###############################################################################
+# # Influx HTTP write listener
+[[inputs.http_listener]]
+  ## Address and port to host HTTP listener on
+  service_address = ":8186"
+
+  ## timeouts
+  read_timeout = "10s"
+  write_timeout = "10s"
+
+  ## HTTPS
+  #tls_cert= "/etc/telegraf/cert.pem"
+  #tls_key = "/etc/telegraf/key.pem"
+
+  ## MTLS
+  #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
+  
\ No newline at end of file
diff --git a/src/mediaServiceSim/LineProtocolGenerator.py b/src/mediaServiceSim/LineProtocolGenerator.py
index 25d19df73c2b977903f181b357adb9b790918989..f57689a2a34d90871caf0a864b21c93a860bd2df 100644
--- a/src/mediaServiceSim/LineProtocolGenerator.py
+++ b/src/mediaServiceSim/LineProtocolGenerator.py
@@ -4,6 +4,90 @@
 import uuid
 from random import random, randint
 
+
+# Reports TX and RX, scaling on requested quality
+def generate_network_report(recieved_bytes, sent_bytes, time):
+    # Measurement
+    result = 'net_port_io'
+    # Tags
+    result += ',port_id=enps03 '
+    # Fields
+    result += 'RX_BYTES_PORT_M=' + str(recieved_bytes) + ","
+    result += 'TX_BYTES_PORT_M=' + str(sent_bytes)
+    # Timestamp
+    result += ' ' + str(_getNSTime(time))
+
+    # Measurement
+    # print('network'+result)
+    return result
+
+
+# Formats VM config
+def generate_vm_config(state, cpu, mem, storage, time):
+    # metric
+    result = 'vm_res_alloc'
+    # Tags
+    result += ',vm_state=' + quote_wrap(state)
+    result += ' '
+    # Fields
+    result += 'cpu=' + str(cpu)
+    result += ',memory=' + quote_wrap(mem)
+    result += ',storage=' + quote_wrap(storage)
+
+    # Time
+    result += ' ' + str(_getNSTime(time))
+
+    print(result)
+    return result
+
+
+# Reports cpu usage, scaling on requests
+def generate_cpu_report(cpu_useage, cpu_usage_system, time):
+    result = 'vm_host_cpu_usage'
+    # Tag
+    result += ' '
+    # field
+    result += 'cpu_usage='+str(cpu_useage)
+    result += ',cpu_usage_system='+str(cpu_usage_system)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
+
+
+# Reports response times, scaling on number of requests
+def generate_mpegdash_report(resource, requests, avg_response_time, peak_response_time, time):
+    # Measurement
+    result = 'mpegdash_service'
+    # Tags
+    result += ',cont_nav=\"' + str(resource) + "\" "
+    # Fields
+
+    # result += 'cont_rep=' + str(quality) + ','
+    result += 'requests=' + str(requests) + ','
+    result += 'avg_response_time=' + str(avg_response_time) + ','
+    result += 'peak_response_time=' + str(peak_response_time)
+    # Timestamp
+    result += ' ' + str(_getNSTime(time))
+    # print(result)
+    return result
+
+
+# Influx needs strings to be quoted, this provides a utility interface to do this
+def quote_wrap(str):
+    return "\"" + str + "\""
+
+
+# InfluxDB likes to have time-stamps in nanoseconds
+def _getNSTime(time):
+    # Convert to nano-seconds
+    return 1000000 * time
+
+# DEPRICATED
+# ____________________________________________________________________________
+
+# DEPRICATED: old structure, not part of new spec
 def _generateClientRequest(cReq, id, time):
     # Tags first
     result = 'sid="' + str(id) + '",' + cReq
@@ -19,6 +103,7 @@ def _generateClientRequest(cReq, id, time):
 
 
 # Method to create a full InfluxDB response statement
+# DEPRECATED: old structure, not part of new spec
 def _generateServerResponse(reqID, quality, time, cpuUsage, qualityDifference):
     # Tags first
     result = ' '
@@ -38,47 +123,13 @@ def _generateServerResponse(reqID, quality, time, cpuUsage, qualityDifference):
     return 'response' + result
 
 
-def _generateNetworkReport(sum_of_client_quality, time):
-    # Measurement
-    result = 'net_port_io'
-    # Tags
-    result += ',port_id=enps03 '
-    # Fields
-    result += 'RX_BYTES_PORT_M=' + str(sum_of_client_quality * 32) + ","
-    result += 'TX_BYTES_PORT_M=' + str(sum_of_client_quality * 1024)
-    # Timestamp
-    result += ' ' + str(_getNSTime(time))
-
-    # Measurement
-    # print('network'+result)
-    return result
-
-
-def _generateMpegDashReport(resource, quality, time):
-    # Measurement
-    result = 'mpegdash_service '
-    # Tags
-    #None
-    # Fields
-    requests = randint(10, 30)
-    avg_response_time = 50 + randint(0, 100) + randint(0, 10 * quality)
-    peak_response_time = avg_response_time + randint(30, 60) + randint(5, 10) * quality
-    result += 'cont_nav=\"' + str(resource) + "\","
-    result += 'cont_rep=' + str(quality) + ','
-    result += 'requests=' + str(requests) + ','
-    result += 'avg_response_time=' + str(avg_response_time) + ','
-    result += 'peak_response_time=' + str(peak_response_time)
-    # Timestamp
-    result += ' ' + str(_getNSTime(time))
-    #print(result)
-    return result
-
 
+# Formats server config
 def _generateServerConfig(ID, location, cpu, mem, storage, time):
     # metric
     result = 'host_resource'
     # Tags
-    result += ',slide_id=' + quote_wrap(ID)
+    result += ',slice_id=' + quote_wrap(ID)
     result += ',location=' + quote_wrap(location)
     result += ' '
     # Fields
@@ -93,31 +144,15 @@ def _generateServerConfig(ID, location, cpu, mem, storage, time):
     return result
 
 
-def _generateVMConfig(state, cpu, mem, storage, time):
-    # metric
-    result = 'vm_res_alloc'
-    # Tags
-    result += ',vm_state=' + quote_wrap(state)
-    result += ' '
-    # Fields
-    result += 'cpu=' + str(cpu)
-    result += ',memory=' + quote_wrap(mem)
-    result += ',storage=' + quote_wrap(storage)
-
-    # Time
-    result += ' ' + str(_getNSTime(time))
-
-    print(result)
-    return result
-
 
+# Format port config
 def _configure_port(port_id, state, rate, time):
     # metric
     result = 'net_port_config '
     # Fields
     result += 'port_id=' + quote_wrap('enps' + port_id)
-    result += 'port_state=' + quote_wrap(state)
-    result += 'tx_constraint=' + quote_wrap(rate)
+    result += ',port_state=' + quote_wrap(state)
+    result += ',tx_constraint=' + quote_wrap(rate)
     result += ' '
 
     # Time
@@ -126,22 +161,129 @@ def _configure_port(port_id, state, rate, time):
     print(result)
     return result
 
+
+# Format service function config
 def _configure_service_function(state, max_connected_clients):
     # measurement
     result = 'mpegdash_service_config'
     # tags
-    result += ',running='+quote_wrap(state)
+    result += ',service_state='+quote_wrap(state)
     result += ' '
     # fields
-    result += 'max_connected_clients='+max_connected_clients
+    result += 'max_connected_clients='+str(max_connected_clients)
+
+    return result
+
+
+
+# Reports memory usage, scaling on requests
+def generate_mem_report(requests, total_mem, time):
+    # Measurement
+    result = 'mem'
+    result += ' '
+    # field
+    used = randint(0, min(100,5*requests))
+    available = 100-used
+    result += 'available_percent='+str(available)
+    result += ',used_percent='+str(used)
+    result += ',total='+str(total_mem)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
+
+
+# Formats compute node config
+def generate_compute_node_config(slice_id, location, node_id, cpus, mem, storage, time):
+    # Measurement
+    result = 'compute_node_config'
+    # CommonContext Tag
+    result += ',slide_id='+quote_wrap(slice_id)
+    # Tag
+    result += ',location='+quote_wrap(location)
+    result += ',comp_node_id='+quote_wrap(node_id)
+    result += ' '
+    # field
+    result += 'cpus='+str(cpus)
+    result += ',memory='+str(mem)
+    result += ',storage='+str(storage)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
+
+
+# Formats network resource config
+def generate_network_resource_config(slice_id, network_id, bandwidth, time):
+    # Measurement
+    result = 'network_resource_config'
+    # Meta Tag
+    result += ',slice_id='+quote_wrap(slice_id)
+    # Tag
+    result += 'network_id='+quote_wrap(network_id)
+    result += ' '
+    # field
+    result += 'bandwidth='+str(bandwidth)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
+
+
+# Formats network interface config
+def generate_network_interface_config(slice_id, comp_node_id, port_id, rx_constraint, tx_constraint, time):
+    # Measurement
+    result = 'network_interface_config'
+    # Meta Tag
+    result += ',slice_id'+quote_wrap(slice_id)
+    # Tags
+    result += ',comp_node_id='+quote_wrap(comp_node_id)
+    result += ',port_id='+quote_wrap(port_id)
+    result += ' '
+    # field
+    result += 'rx_constraint='+str(rx_constraint)
+    result += ',tx_constraint='+str(tx_constraint)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
+
+
+# Format SF instance config
+def generate_sf_instance_surrogate_config(loc, sfc, sfc_i, sf_package, sf_i, cpus, mem, storage, time):
+    # Measurement
+    result = 'sf_instance_surrogate_config'
+    # Meta Tag
+    result += ',location'+quote_wrap(loc)
+    result += ',sfc'+quote_wrap(sfc)
+    result += ',sfc_i'+quote_wrap(sfc_i)
+    result += ',sf_package'+quote_wrap(sf_package)
+    result += ',sf_i'+quote_wrap(sf_i)
+    result += ' '
+    # field
+    result += 'cpus='+str(cpus)
+    result += ',memory='+str(mem)
+    result += ',storage='+str(storage)
+    result += ' '
+    # Time
+    result += str(_getNSTime(time))
+    print(result)
+    return result
+
+
+# Formats context container as part of other line protocol generators
+def service_function_measurement(measurement, service_function_context):
+    result = measurement
+    result += ',sfc'+quote_wrap(service_function_context.sfc)
+    result += ',sfc_i'+quote_wrap(service_function_context.sfc_i)
+    result += ',sf_package'+quote_wrap(service_function_context.sf_package)
+    result += ',sf_i'+quote_wrap(service_function_context.sf_i)
 
     return result
 
-def quote_wrap(str):
-    return "\"" + str + "\""
 
 
-# InfluxDB likes to have time-stamps in nanoseconds
-def _getNSTime(time):
-    # Convert to nano-seconds
-    return 1000000 * time
diff --git a/src/mediaServiceSim/__pycache__/LineProtocolGenerator.cpython-36.pyc b/src/mediaServiceSim/__pycache__/LineProtocolGenerator.cpython-36.pyc
deleted file mode 100644
index f38f213dc30a01a2424732fbe023db717c3fe646..0000000000000000000000000000000000000000
Binary files a/src/mediaServiceSim/__pycache__/LineProtocolGenerator.cpython-36.pyc and /dev/null differ
diff --git a/src/mediaServiceSim/serviceSim.py b/src/mediaServiceSim/serviceSim.py
index 576773e7f3c47f18e30d308fc7f0b7c10c73423e..2cdc993af6c0d0b543abbc7b5cd396a3a786fd8e 100644
--- a/src/mediaServiceSim/serviceSim.py
+++ b/src/mediaServiceSim/serviceSim.py
@@ -34,59 +34,61 @@ import urllib.parse
 import urllib.request
 import LineProtocolGenerator as lp
 
+
 # DemoConfig is a configuration class used to set up the simulation
 class DemoConfig(object):
-    def __init__( self ):
-        self.LOG_DATA               = False # Log data sent to INFLUX if true
-        self.ITERATION_STRIDE       = 10    # Number of seconds of requests/responses sent to INFLUXDB per HTTP POST
-        self.SEG_LENGTH             = 4     # Each MPEG segment encodes 5 seconds worth of frames (assume double-buffering)
-        self.MAX_SEG                = (30 * 60) / (self.SEG_LENGTH +1) # 30 mins
-        self.MIN_QUALITY            = 5     # Minimum quality requested by a client
-        self.MAX_QUALITY            = 9     # Maximum quality requested by a client
-        self.MIN_SERV_RESP_TIME     = 100   # Mininum time taken for server to respond to a request (ms)
-        self.CLIENT_START_DELAY_MAX = 360   # Randomly delay clients starting stream up to 3 minutes
+    def __init__(self):
+        self.LOG_DATA = False  # Log data sent to INFLUX if true
+        self.ITERATION_STRIDE = 10  # Number of seconds of requests/responses sent to INFLUXDB per HTTP POST
+        self.SEG_LENGTH = 4  # Each MPEG segment encodes 5 seconds worth of frames (assume double-buffering)
+        self.MAX_SEG = (30 * 60) / (self.SEG_LENGTH + 1)  # 30 mins
+        self.MIN_QUALITY = 5  # Minimum quality requested by a client
+        self.MAX_QUALITY = 9  # Maximum quality requested by a client
+        self.MIN_SERV_RESP_TIME = 100  # Mininum time taken for server to respond to a request (ms)
+        self.CLIENT_START_DELAY_MAX = 360  # Randomly delay clients starting stream up to 3 minutes
+
 
 dc = DemoConfig()
 
 
 # DemoClient is a class the simulations the behaviour of a single client requesting video from the server
 class DemoClient(object):
-    def __init__( self ):
-        self.startRequestOffset = randint( 0, dc.CLIENT_START_DELAY_MAX ) # Random time offset before requesting 1st segment
-        self.numSegRequests = dc.MAX_SEG - randint( 0, 50 )               # Randomly stop client watching all of video
-        self.id = uuid.uuid4()                                            # Client's ID
-        self.currSeg = 1                                                  # Client's current segment
-        self.nextSegCountDown = 0                                         # Count-down before asking for next segment
-        self.qualityReq = randint( dc.MIN_QUALITY, dc.MAX_QUALITY )       # Randomly assigned quality for this client
-        self.lastReqID = None                                             # ID used to track last request made by this client
-
-
-    def getQuality( self ):
+    def __init__(self):
+        self.startRequestOffset = randint(0,
+                                          dc.CLIENT_START_DELAY_MAX)  # Random time offset before requesting 1st segment
+        self.numSegRequests = dc.MAX_SEG - randint(0, 50)  # Randomly stop client watching all of video
+        self.id = uuid.uuid4()  # Client's ID
+        self.currSeg = 1  # Client's current segment
+        self.nextSegCountDown = 0  # Count-down before asking for next segment
+        self.qualityReq = randint(dc.MIN_QUALITY, dc.MAX_QUALITY)  # Randomly assigned quality for this client
+        self.lastReqID = None  # ID used to track last request made by this client
+
+    def getQuality(self):
         return self.qualityReq
 
-    def getLastRequestID( self ):
+    def getLastRequestID(self):
         return self.lastReqID
 
-    def iterateRequest( self ):
+    def iterateRequest(self):
         result = None
 
         # If the time offset before asking for 1st segment is through and there are more segments to get
         # and it is time to get one, then create a request for one!
-        if ( self.startRequestOffset == 0 ):
-            if ( self.numSegRequests > 0 ):
-                if ( self.nextSegCountDown == 0 ):
+        if (self.startRequestOffset == 0):
+            if (self.numSegRequests > 0):
+                if (self.nextSegCountDown == 0):
 
                     # Generate a request ID
                     self.lastReqID = uuid.uuid4()
 
                     # Start building the InfluxDB statement
                     # tags first
-                    result = 'cid="' + str( self.id ) + '",'
-                    result += 'segment=' + str( self.currSeg ) + ' '
+                    result = 'cid="' + str(self.id) + '",'
+                    result += 'segment=' + str(self.currSeg) + ' '
 
                     # then fields
-                    result += 'quality=' + str( self.qualityReq ) + ','
-                    result += 'index="' + str( self.lastReqID ) + '"'
+                    result += 'quality=' + str(self.qualityReq) + ','
+                    result += 'index="' + str(self.lastReqID) + '"'
 
                     # Update this client's segment tracking
                     self.currSeg += 1
@@ -101,76 +103,125 @@ class DemoClient(object):
         return result
 
 
+# Used to tell influx to launch or teardown a database (DB name overwritten by telegraf)
+class DatabaseManager:
+    def __init__(self, influx_url, db_name):
+        self.influx_url = influx_url
+        self.influx_db = db_name
 
+    def database_up(self):
+        self._createDB()
+
+    def database_teardown(self):
+        self._deleteDB()
+
+    def _createDB(self):
+        self._sendInfluxQuery('CREATE DATABASE ' + self.influx_db)
+
+    def _deleteDB(self):
+        self._sendInfluxQuery('DROP DATABASE ' + self.influx_db)
+
+    def _sendInfluxQuery(self, query):
+        query = urllib.parse.urlencode({'q': query})
+        query = query.encode('ascii')
+        req = urllib.request.Request(self.influx_url + '/query ', query)
+        urllib.request.urlopen(req)
+
+
+# Used to allocate clients to servers
+class ClientManager:
+    def __init__(self, servers):
+        self.servers = servers
+    def generate_new_clients(self, amount):
+        assigned_count = 0
+        while(assigned_count < amount):
+            for server in self.servers:
+                if(assigned_count < amount):
+                    server.assign_client(DemoClient())
+                    assigned_count += 1
+
+
+# Simulates nodes not connected directly to clients (e.g. telegraf)
+class Node:
+    def __init__(self, influxurl, influxdb, input_cpu):
+        self.influx_url = influxurl
+        self.influx_db = influxdb
+        self.report_cpu = input_cpu
+    def iterateService(self):
+        if self.report_cpu:
+            self._sendInfluxData(lp.generate_CPU_report(0))
+            self._sendInfluxData(lp.generate_mem_report(10, 0))
+
+    # Private Methods
+    # ________________________________________________________________
+
+    # This is duplicated from DemoServer, should probably be refactored
+    def _sendInfluxData(self, data):
+        data = data.encode()
+        header = {'Content-Type': 'application/octet-stream'}
+        req = urllib.request.Request(self.influx_url + '/write?db=' + self.influx_db, data, header)
+        urllib.request.urlopen(req)
+
+# Container for common SF tags, used as part of generating SF usage reports
 
 
 # DemoServer is the class that simulates the behaviour of the MPEG-DASH server
 class DemoServer(object):
-    def __init__( self, cc, si, dbURL, dbName ):
-        self.influxDB = dbName                                   # InfluxDB database name
-        self.id = uuid.uuid4()                                   # MPEG-DASH server ID
-        self.clientCount = cc                                    # Number of clients to simulate
-        self.simIterations = si                                  # Number of iterations to make for this simulation
-        self.influxURL = dbURL                                   # InfluxDB connection URL
-        self.currentTime = int( round( time.time() * 1000 ) )    # The current time
-        
-
-    def prepareDatabase( self ):
-        self._createDB()
-
+    def __init__(self, si, db_url, db_name, server_id, server_location):
+        self.influxDB = db_name  # InfluxDB database name
+        self.id = uuid.uuid4()  # MPEG-DASH server ID
+        self.simIterations = si  # Number of iterations to make for this simulation
+        self.influxURL = db_url  # InfluxDB connection URL
+        self.currentTime = int(round(time.time() * 1000))  # The current time
+        self._configure(server_id, server_location)
         self.clients = []
-        for i in range( self.clientCount ):
-            self.clients.append( DemoClient() )
 
-    def destroyDatabase( self ):
-        self._deleteDB( self.influxDB )
+    def shutdown(self):
+        print("Shutting down")
+        self.configure_VM('stopping')
 
-    def reportStatus( self ):
-        print ('Number of clients: ' + str(len(self.clients)) )
+    def assign_client(self, new_client):
+        self.clients.append(new_client)
+        print('Number of clients: ' + str(len(self.clients)))
 
-    def configure_servers(self):
+    def configure_server(self, server_id, server_location):
         print("Configuring Servers")
         server_conf_block = []
-        ids = ['A', 'B', 'C']
-        locations = ['locA', 'locB', 'locC']
-        for i, id in enumerate(ids):
-            server_conf_block.append(lp._generateServerConfig(id,locations[i],8,'100G','1T', self._selectDelay(len(ids))))
+        server_conf_block.append(lp._generateServerConfig(server_id, server_location, 8, '100G', '1T',
+                                                          self._selectDelay(0)))
+
+        #ids = ['A', 'B', 'C']
+        #locations = ['locA', 'locB', 'locC']
+        #for i, id in enumerate(ids):
+        #    server_conf_block.append(
+        #        lp._generateServerConfig(id, locations[i], 8, '100G', '1T', self._selectDelay(len(ids))))
         self._sendInfluxDataBlock(server_conf_block)
-    def configure_VMs(self):
-        print("Configuring VM nodes")
-        VM_conf_block = []
-        self._generateVMS('starting',10,VM_conf_block)
-        self._generateVMS('running',10,VM_conf_block)
 
-        self._sendInfluxDataBlock(VM_conf_block)
+    def configure_VM(self, state):
+        print("Configuring VM node")
+        self._sendInfluxData(self._generateVM(state, 1))
 
     def configure_ports(self):
         print("Configuring Servers")
         server_conf_block = []
-        for i in range(0,10):
+        for i in range(0, 10):
             server_conf_block.append(lp._configure_port())
         self._sendInfluxDataBlock(server_conf_block)
 
-
-    def shutdown_VMs(self):
+    def shutdown_VM(self):
         print("Shutting down VM nodes")
         VM_conf_block = []
-        self._generateVMS('stopping',10,VM_conf_block)
+        self._generateVMS('stopping', 10, VM_conf_block)
 
         self._sendInfluxDataBlock(VM_conf_block)
 
-    def _generateVMS(self,state, amount, datablock):
-        for i in range(0, amount):
-            datablock.append(lp._generateVMConfig(state, 1, '100G', '1T', self._selectDelay(amount)))
-
-    def iterateService( self ):
+    def iterateService(self):
         # The simulation will run through 'X' iterations of the simulation
         # each time this method is called. This allows request/response messages to be
         # batched and sent to the InfluxDB in sensible sized blocks
-        return self._executeServiceIteration( dc.ITERATION_STRIDE )
+        return self._executeServiceIteration(dc.ITERATION_STRIDE)
 
-# 'Private' methods ________________________________________________________
-    def _executeServiceIteration( self, count ):
+    def _executeServiceIteration(self, count):
 
         requestBlock = []
         responseBlock = []
@@ -179,11 +230,11 @@ class DemoServer(object):
         totalDifference = sumOfclientQuality = percentageDifference = 0
 
         # Keep going until this stride (count) completes
-        while ( count > 0 ):
+        while (count > 0):
             count -= 1
 
             # Check we have some iterations to do
-            if ( self.simIterations > 0 ):
+            if (self.simIterations > 0):
                 # First record clients that request segments
                 clientsRequesting = []
 
@@ -192,186 +243,195 @@ class DemoServer(object):
 
                     # Record request, if it was generated
                     cReq = client.iterateRequest()
-                    if ( cReq != None ):
-
-                        clientsRequesting.append( client )
-                        requestBlock.append( lp._generateClientRequest(cReq, self.id, self.currentTime) )
-
-
+                    if cReq is not None:
+                        clientsRequesting.append(client)
+                        requestBlock.append(lp._generateClientRequest(cReq, self.id, self.currentTime))
 
                 # Now generate request statistics
-                clientReqCount = len( clientsRequesting )
+                clientReqCount = len(clientsRequesting)
 
                 # Create a single CPU usage metric for this iteration
-                cpuUsagePercentage =self._cpuUsage(clientReqCount)
+                cpuUsagePercentage = self._cpuUsage(clientReqCount)
 
                 # Now generate responses, based on stats
                 for client in clientsRequesting:
-
                     # Generate some quality and delays based on the number of clients requesting for this iteration
-                    qualitySelect = self._selectQuality( client.getQuality(), clientReqCount )
-                    delaySelect = self._selectDelay( clientReqCount ) + self.currentTime
+                    qualitySelect = self._selectQuality(client.getQuality(), clientReqCount)
+                    delaySelect = self._selectDelay(clientReqCount) + self.currentTime
                     qualityDifference = client.getQuality() - qualitySelect
-                    totalDifference+=qualityDifference
+                    totalDifference += qualityDifference
                     # print('totalDifference = ' + str(totalDifference) +'\n')
-                    sumOfclientQuality+=client.getQuality()
+                    sumOfclientQuality += client.getQuality()
                     # print('sumOfclientQuality = ' + str(sumOfclientQuality) + '\n')
-                    percentageDifference=int((totalDifference*100)/sumOfclientQuality)
+                    percentageDifference = int((totalDifference * 100) / sumOfclientQuality)
                     # print('percentageOfQualityDifference = ' + str(percentageDifference) + '%')
- 
+
                     responseBlock.append(lp._generateServerResponse(client.getLastRequestID(), qualitySelect,
-                                                                      delaySelect, cpuUsagePercentage,
-                                                                      percentageDifference))
+                                                                    delaySelect, cpuUsagePercentage,
+                                                                    percentageDifference))
                     SFBlock.append(lp._generateMpegDashReport('https://netflix.com/scream', qualitySelect, delaySelect))
 
-
                     networkBlock.append(lp._generateNetworkReport(sumOfclientQuality, delaySelect))
                 # Iterate the service simulation
                 self.simIterations -= 1
-                self.currentTime += 1000 # advance 1 second
+                self.currentTime += 1000  # advance 1 second
 
         # If we have some requests/responses to send to InfluxDB, do it
-        if ( len(requestBlock) > 0 and len(responseBlock) > 0 ):
-            self._sendInfluxDataBlock( requestBlock )
-            self._sendInfluxDataBlock( responseBlock )
-            self._sendInfluxDataBlock( networkBlock )
-            self._sendInfluxDataBlock( SFBlock )
+        if (len(requestBlock) > 0 and len(responseBlock) > 0):
+            self._sendInfluxDataBlock(requestBlock)
+            self._sendInfluxDataBlock(responseBlock)
+            self._sendInfluxDataBlock(networkBlock)
+            self._sendInfluxDataBlock(SFBlock)
             print("Sending influx data blocks")
 
         return self.simIterations
 
+    def _generateVM(self, state, delay):
+        return lp._generateVMConfig(state, 1, '100G', '1T', self._selectDelay(delay))
+
+    # 'Private' methods ________________________________________________________
+    def _configure(self, server_id, server_location):
+        print("Configuring")
+        self.configure_VM('starting')
+        self.configure_VM('running')
+        #time.sleep(0.1)
+        self.configure_server(server_id, server_location)
+        self._sendInfluxData(lp._configure_port('01', 'running', '1GB/s', self.currentTime))
+        self._sendInfluxData(lp._configure_service_function('starting', 100))
+        #time.sleep(0.1)
+        self._sendInfluxData(lp._configure_service_function('running', 100))
+
     def _cpuUsage(self, clientCount):
-        cpuUsage=randint(0, 10)
+        cpuUsage = randint(0, 10)
 
-        if ( clientCount < 20 ):
+        if (clientCount < 20):
             cpuUsage += 5
-        elif ( clientCount >= 20 and clientCount <40 ):
+        elif (clientCount >= 20 and clientCount < 40):
             cpuUsage += 10
-        elif ( clientCount >= 40 and clientCount <60 ):
+        elif (clientCount >= 40 and clientCount < 60):
             cpuUsage += 15
-        elif ( clientCount >= 60 and clientCount <80 ):
+        elif (clientCount >= 60 and clientCount < 80):
             cpuUsage += 20
-        elif ( clientCount >= 80 and clientCount <110 ):
+        elif (clientCount >= 80 and clientCount < 110):
             cpuUsage += 30
-        elif ( clientCount >= 110 and clientCount <150 ):
+        elif (clientCount >= 110 and clientCount < 150):
             cpuUsage += 40
-        elif ( clientCount >= 150 and clientCount <200 ):
+        elif (clientCount >= 150 and clientCount < 200):
             cpuUsage += 55
-        elif ( clientCount >= 200 and clientCount <300 ):
+        elif (clientCount >= 200 and clientCount < 300):
             cpuUsage += 70
-        elif ( clientCount >= 300 ):
+        elif (clientCount >= 300):
             cpuUsage += 90
 
         return cpuUsage
 
-
     # Rule to determine a response quality, based on the current number of clients requesting
-    def _selectQuality( self, expectedQuality, clientCount ):
+    def _selectQuality(self, expectedQuality, clientCount):
 
         result = dc.MAX_QUALITY
 
-        if ( clientCount < 50 ):
+        if (clientCount < 50):
             result = 8
-        elif ( clientCount >= 50 and clientCount < 100 ):
+        elif (clientCount >= 50 and clientCount < 100):
             result = 7
-        elif ( clientCount >= 100 and clientCount < 150 ):
+        elif (clientCount >= 100 and clientCount < 150):
             result = 6
-        elif ( clientCount >= 150 and clientCount < 200 ):
+        elif (clientCount >= 150 and clientCount < 200):
             result = 5
-        elif ( clientCount >= 200 and clientCount < 250 ):
+        elif (clientCount >= 200 and clientCount < 250):
             result = 4
-        elif ( clientCount >= 250 and clientCount < 300 ):
+        elif (clientCount >= 250 and clientCount < 300):
             result = 3
-        elif ( clientCount >= 300 ):
+        elif (clientCount >= 300):
             result = 2
 
         # Give the client what it wants if possible
-        if ( result > expectedQuality ):
+        if (result > expectedQuality):
             result = expectedQuality
 
         return result
 
     # Rule to determine a delay, based on the current number of clients requesting
-    def _selectDelay( self, cCount ):
+    def _selectDelay(self, cCount):
 
         result = dc.MIN_SERV_RESP_TIME
 
-        if ( cCount < 50 ):
+        if (cCount < 50):
             result = 150
-        elif ( cCount >= 50 and cCount < 100 ):
+        elif (cCount >= 50 and cCount < 100):
             result = 200
-        elif ( cCount > 100 and cCount < 200 ):
+        elif (cCount > 100 and cCount < 200):
             result = 500
-        elif ( cCount >= 200 ):
+        elif (cCount >= 200):
             result = 1000
 
         # Perturb the delay a bit
-        result += randint( 0, 20 )
+        result += randint(0, 20)
 
         return result
 
-
-    def _createDB( self ):
-        self._sendInfluxQuery( 'CREATE DATABASE '+  self.influxDB )
-
-    def _deleteDB( self, dbName ):
-        self._sendInfluxQuery( 'DROP DATABASE ' + self.influxDB )
-
     # InfluxDB data send methods
     # -----------------------------------------------------------------------------------------------
-    def _sendInfluxQuery( self, query ):
-        query = urllib.parse.urlencode( {'q' : query} )
-        query = query.encode('ascii')
-        req = urllib.request.Request( self.influxURL + '/query ', query )
-        urllib.request.urlopen( req )
 
-    def _sendInfluxData( self, data ):
+    def _sendInfluxData(self, data):
         data = data.encode()
-        header = {'Content-Type': 'application/octet-stream' }
-        req = urllib.request.Request( self.influxURL + '/write?db=' +self.influxDB, data, header )
-        urllib.request.urlopen( req )
+        header = {'Content-Type': 'application/octet-stream'}
+        req = urllib.request.Request(self.influxURL + '/write?db=' + self.influxDB, data, header)
+        urllib.request.urlopen(req)
 
-    def _sendInfluxDataBlock( self, dataBlock ):
+    def _sendInfluxDataBlock(self, dataBlock):
         msg = ''
         for stmt in dataBlock:
             msg += stmt + '\n'
 
         try:
-            if ( dc.LOG_DATA == True ):
-                print( msg )
+            if (dc.LOG_DATA == True):
+                print(msg)
 
-            self._sendInfluxData( msg )
+            self._sendInfluxData(msg)
 
         except urllib.error.HTTPError as ex:
-            print ( "Error calling: " + str( ex.url) + "..." + str(ex.msg) )
+            print("Error calling: " + str(ex.url) + "..." + str(ex.msg))
 
 
 # Entry point
 # -----------------------------------------------------------------------------------------------
-print( "Preparing simulation" )
-clients    = 10
+print("Preparing simulation")
+# Iterations is time in seconds for each server to simulate
 iterations = 3000
 # port 8086: Direct to DB specified
 # port 8186: To telegraf, telegraf specifies DB
-demoServer = DemoServer(clients, iterations, 'http://localhost:8186', 'testDB')
-
+start_time = time.localtime()
+database_manager = DatabaseManager('http://localhost:8186', 'testDB')
 # Set up InfluxDB (need to wait a little while)
-demoServer.destroyDatabase()
+database_manager.database_teardown()
 time.sleep(2)
-demoServer.prepareDatabase()
+database_manager.database_up()
 time.sleep(2)
-demoServer.reportStatus()
-demoServer.configure_servers()
-demoServer.configure_VMs()
+# configure servers
+demoServer_southampton = DemoServer(iterations, 'http://localhost:8186', 'testDB', "Server1", "Southampton")
+demoServer_bristol = DemoServer(iterations, 'http://localhost:8186', 'testDB', "Server2", "Bristol")
+telegraf_node = Node('http://localhost:8186', 'testDB', True)
+server_list = [demoServer_southampton, demoServer_bristol]
+client_manager = ClientManager(server_list)
+client_manager.generate_new_clients(20)
+
 # Start simulation
-print( "Starting simulation" )
-while ( True ):
-    itCount = demoServer.iterateService()
-    pcDone = round ( (itCount/ iterations) * 100 )
+print("Starting simulation")
+while True:
+    for server in server_list:
+        itCount = server.iterateService()
+    telegraf_node.iterateService()
+    pcDone = round((itCount / iterations) * 100)
 
-    print( "Simulation remaining (%): " + str( pcDone ) +" \r", end='' )
+    print("Simulation remaining (%): " + str(pcDone) + " \r", end='')
 
-    if ( itCount == 0 ):
+    if itCount == 0:
         break
-demoServer.shutdown_VMs()
-print("\nFinished")
\ No newline at end of file
+
+for server in server_list:
+    server.shutdown()
+print("\nFinished")
+end_time = time.localtime()
+print("Started at {0} ended at {1}, total run time {2}".format(start_time,end_time,(end_time-start_time)))
+
diff --git a/src/mediaServiceSim/simulator_v2.py b/src/mediaServiceSim/simulator_v2.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f06f9dc2ce226ff8d7956afc5cfaadf33791bc0
--- /dev/null
+++ b/src/mediaServiceSim/simulator_v2.py
@@ -0,0 +1,91 @@
+import LineProtocolGenerator as lp
+import time
+import urllib.parse
+import urllib.request
+from random import random, randint
+
+# Simulator for services
+class sim:
+    def __init__(self, influx_url):
+        # requests per second for different quality levels
+        self.quality_request_rate = {"DC1": [10, 20, 10], "DC2": [5, 30, 5]}
+
+        # We don't need this as the db is CLMC metrics
+        self.influx_db = 'CLMCMetrics'
+        self.influx_url = influx_url
+        # Teardown DB from previous sim and bring it back up
+        self._deleteDB()
+        self._createDB()
+
+
+    def run(self, simulation_length_seconds):
+        start_time = time.time()
+        current_time = int(time.time())
+        surrogate_services = [{'agent_url': 'http://192.168.50.11:8186', 'location': 'DC1', 'cpu': 2,
+                                'mem': '8GB', 'storage': '1TB'},
+                              {'agent_url': 'http://192.168.50.12:8186', 'location': 'DC2', 'cpu': 4, 
+                                'mem': '8GB', 'storage': '1TB'}
+                              ]
+        # Simulate surrogate services being asserted
+        for service in surrogate_services:
+            self._sendInfluxData(service['agent_url'], lp.generate_vm_config('starting', service['cpu'], service['mem'], service['storage'], current_time))
+        for service in surrogate_services:
+            self._sendInfluxData(service['agent_url'], lp.generate_vm_config('running', service['cpu'], service['mem'], service['storage'], current_time))
+
+        # Run simulation
+        for i in range(simulation_length_seconds):
+            for service in surrogate_services:
+                
+                # Scale CPU usage on number of requests, quality and cpu allocation
+                cpu_usage = self.quality_request_rate[service['location']][0]
+                cpu_usage += self.quality_request_rate[service['location']][1]*2
+                cpu_usage += self.quality_request_rate[service['location']][2]*4
+                cpu_usage = cpu_usage/service['cpu']
+                cpu_usage = cpu_usage/100 # Transform into %
+                self._sendInfluxData(service['agent_url'], lp.generate_cpu_report(                                                            cpu_usage, cpu_usage, current_time))
+                
+                # Scale SENT/REC bytes on requests and quality
+                bytes = self.quality_request_rate[service['location']][0]
+                bytes += self.quality_request_rate[service['location']][1]*2
+                bytes += self.quality_request_rate[service['location']][2]*4
+                bytes_sent = 1024*bytes
+                bytes_rec = 32*bytes
+                self._sendInfluxData(service['agent_url'], lp.generate_network_report(bytes_rec, bytes_sent, current_time))
+
+                # Scale MPEG Dash on requests, quality, cpu usage
+                avg_response_time = randint(0, 5 * self.quality_request_rate[service['location']][0])
+                avg_response_time += randint(0, 10 * self.quality_request_rate[service['location']][1])
+                avg_response_time += randint(0, 15 * self.quality_request_rate[service['location']][2])
+                avg_response_time *= cpu_usage
+                peak_response_time = avg_response_time + randint(30, 60)
+                requests = sum(self.quality_request_rate[service['location']])
+                self._sendInfluxData(service['agent_url'], lp.generate_mpegdash_report('https://Netflix.com/scream', requests, avg_response_time, peak_response_time, current_time))
+            # Add a second to the clock
+            current_time += 1000
+        end_time = time.time()
+        print("Simulation Finished. Start time {0}. End time {1}. Total time {2}".format(start_time,end_time,end_time-start_time))
+
+    def _createDB(self):
+        self._sendInfluxQuery(self.influx_url, 'CREATE DATABASE ' + self.influx_db)
+
+
+    def _deleteDB(self):
+        self._sendInfluxQuery(self.influx_url, 'DROP DATABASE ' + self.influx_db)
+
+
+    def _sendInfluxQuery(self, url, query):
+        query = urllib.parse.urlencode({'q': query})
+        query = query.encode('ascii')
+        req = urllib.request.Request(url + '/query ', query)
+        urllib.request.urlopen(req)
+
+    def _sendInfluxData(self, url, data):
+        data = data.encode()
+        header = {'Content-Type': 'application/octet-stream'}
+        req = urllib.request.Request(url + '/write?db=' + self.influx_db, data, header)
+        urllib.request.urlopen(req)
+
+
+simulator = sim('http://192.168.50.10:8086')
+simulator.run(180)
+
diff --git a/ubuntu-xenial-16.04-cloudimg-console.log b/ubuntu-xenial-16.04-cloudimg-console.log
index 292367aa66bf0e98b3c408f7fd136222f9371ee9..f3285fb6e6cf0d7ec2def8dceb4ebecf988d9e32 100644
--- a/ubuntu-xenial-16.04-cloudimg-console.log
+++ b/ubuntu-xenial-16.04-cloudimg-console.log
@@ -47,7 +47,7 @@
 [    0.000000] NODE_DATA(0) allocated [mem 0x7ffeb000-0x7ffeffff]
 [    0.000000] kvm-clock: Using msrs 4b564d01 and 4b564d00
 [    0.000000] kvm-clock: cpu 0, msr 0:7ffe3001, primary cpu clock
-[    0.000000] kvm-clock: using sched offset of 3405810368 cycles
+[    0.000000] kvm-clock: using sched offset of 3484004824 cycles
 [    0.000000] clocksource: kvm-clock: mask: 0xffffffffffffffff max_cycles: 0x1cd42e4dffb, max_idle_ns: 881590591483 ns
 [    0.000000] Zone ranges:
 [    0.000000]   DMA      [mem 0x0000000000001000-0x0000000000ffffff]
@@ -90,419 +90,410 @@
 [    0.000000] console [tty1] enabled
 [    0.000000] console [ttyS0] enabled
 [    0.000000] tsc: Detected 2693.760 MHz processor
-[    0.494244] Calibrating delay loop (skipped) preset value.. 5387.52 BogoMIPS (lpj=10775040)
-[    0.498784] pid_max: default: 32768 minimum: 301
-[    0.502598] ACPI: Core revision 20150930
-[    0.504318] ACPI: 2 ACPI AML tables successfully acquired and loaded
-[    0.510071] Security Framework initialized
-[    0.514773] Yama: becoming mindful.
-[    0.515407] AppArmor: AppArmor initialized
-[    0.523267] Dentry cache hash table entries: 262144 (order: 9, 2097152 bytes)
-[    0.574198] Inode-cache hash table entries: 131072 (order: 8, 1048576 bytes)
-[    0.609933] Mount-cache hash table entries: 4096 (order: 3, 32768 bytes)
-[    0.621732] Mountpoint-cache hash table entries: 4096 (order: 3, 32768 bytes)
-[    0.643707] Initializing cgroup subsys io
-[    0.644399] Initializing cgroup subsys memory
-[    0.657504] Initializing cgroup subsys devices
-[    0.659472] Initializing cgroup subsys freezer
-[    0.660207] Initializing cgroup subsys net_cls
-[    0.660945] Initializing cgroup subsys perf_event
-[    0.661703] Initializing cgroup subsys net_prio
-[    0.663656] Initializing cgroup subsys hugetlb
-[    0.664385] Initializing cgroup subsys pids
-[    0.665172] CPU: Physical Processor ID: 0
-[    0.666812] mce: CPU supports 0 MCE banks
-[    0.670142] process: using mwait in idle threads
-[    0.679084] Last level iTLB entries: 4KB 1024, 2MB 1024, 4MB 1024
-[    0.680016] Last level dTLB entries: 4KB 1024, 2MB 1024, 4MB 1024, 1GB 4
-[    0.694999] Freeing SMP alternatives memory: 32K
-[    0.733929] ftrace: allocating 32154 entries in 126 pages
-[    0.779978] smpboot: APIC(0) Converting physical 0 to logical package 0
-[    0.863130] smpboot: Max logical packages: 1
-[    0.964479] x2apic enabled
-[    0.975116] Switched APIC routing to physical x2apic.
-[    1.004902] ..TIMER: vector=0x30 apic1=0 pin1=2 apic2=-1 pin2=-1
-[    1.162641] APIC calibration not consistent with PM-Timer: 101ms instead of 100ms
-[    1.179445] APIC delta adjusted to PM-Timer: 6248954 (6314752)
-[    1.186654] smpboot: CPU0: Intel(R) Core(TM) i7-4600U CPU @ 2.10GHz (family: 0x6, model: 0x45, stepping: 0x1)
-[    1.203662] Performance Events: unsupported p6 CPU model 69 no PMU driver, software events only.
-[    1.226663] KVM setup paravirtual spinlock
-[    1.231339] x86: Booted up 1 node, 1 CPUs
-[    1.233337] smpboot: Total of 1 processors activated (5387.52 BogoMIPS)
-[    1.238857] devtmpfs: initialized
-[    1.252438] evm: security.selinux
-[    1.257764] evm: security.SMACK64
-[    1.260100] evm: security.SMACK64EXEC
-[    1.262067] evm: security.SMACK64TRANSMUTE
-[    1.264085] evm: security.SMACK64MMAP
-[    1.270710] evm: security.ima
-[    1.271503] evm: security.capability
-[    1.277593] clocksource: jiffies: mask: 0xffffffff max_cycles: 0xffffffff, max_idle_ns: 7645041785100000 ns
-[    1.280721] futex hash table entries: 256 (order: 2, 16384 bytes)
-[    1.287075] pinctrl core: initialized pinctrl subsystem
-[    1.293181] RTC time: 16:13:53, date: 01/05/18
-[    1.295141] NET: Registered protocol family 16
-[    1.296019] cpuidle: using governor ladder
-[    1.296711] cpuidle: using governor menu
-[    1.297376] PCCT header not found.
-[    1.299905] ACPI: bus type PCI registered
-[    1.300592] acpiphp: ACPI Hot Plug PCI Controller Driver version: 0.5
-[    1.301642] PCI: Using configuration type 1 for base access
-[    1.306175] ACPI: Added _OSI(Module Device)
-[    1.308833] ACPI: Added _OSI(Processor Device)
-[    1.309563] ACPI: Added _OSI(3.0 _SCP Extensions)
-[    1.310335] ACPI: Added _OSI(Processor Aggregator Device)
-[    1.315519] ACPI: Executed 1 blocks of module-level executable AML code
-[    1.320494] ACPI: Interpreter enabled
-[    1.327400] ACPI: (supports S0 S5)
-[    1.329163] ACPI: Using IOAPIC for interrupt routing
-[    1.330089] PCI: Using host bridge windows from ACPI; if necessary, use "pci=nocrs" and report a bug
-[    1.334344] ACPI: PCI Root Bridge [PCI0] (domain 0000 [bus 00-ff])
-[    1.337184] acpi PNP0A03:00: _OSC: OS supports [ASPM ClockPM Segments MSI]
-[    1.341713] acpi PNP0A03:00: _OSC: not requesting OS control; OS requires [ExtendedConfig ASPM ClockPM MSI]
-[    1.347957] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended PCI configuration space under this bridge.
-[    1.358394] PCI host bridge to bus 0000:00
-[    1.359087] pci_bus 0000:00: root bus resource [io  0x0000-0x0cf7 window]
-[    1.363046] pci_bus 0000:00: root bus resource [io  0x0d00-0xffff window]
-[    1.367809] pci_bus 0000:00: root bus resource [mem 0x000a0000-0x000bffff window]
-[    1.369064] pci_bus 0000:00: root bus resource [mem 0x80000000-0xffdfffff window]
-[    1.375739] pci_bus 0000:00: root bus resource [bus 00-ff]
-[    1.380063] pci 0000:00:01.1: legacy IDE quirk: reg 0x10: [io  0x01f0-0x01f7]
-[    1.381099] pci 0000:00:01.1: legacy IDE quirk: reg 0x14: [io  0x03f6]
-[    1.385587] pci 0000:00:01.1: legacy IDE quirk: reg 0x18: [io  0x0170-0x0177]
-[    1.390118] pci 0000:00:01.1: legacy IDE quirk: reg 0x1c: [io  0x0376]
-[    1.423192] pci 0000:00:07.0: quirk: [io  0x4000-0x403f] claimed by PIIX4 ACPI
-[    1.504419] pci 0000:00:07.0: quirk: [io  0x4100-0x410f] claimed by PIIX4 SMB
-[    1.568792] ACPI: PCI Interrupt Link [LNKA] (IRQs 5 9 10 *11)
-[    1.608020] ACPI: PCI Interrupt Link [LNKB] (IRQs 5 9 10 *11)
-[    1.634324] ACPI: PCI Interrupt Link [LNKC] (IRQs 5 9 *10 11)
-[    1.640037] ACPI: PCI Interrupt Link [LNKD] (IRQs 5 *9 10 11)
-[    1.641362] ACPI: Enabled 2 GPEs in block 00 to 07
-[    1.642398] vgaarb: setting as boot device: PCI:0000:00:02.0
-[    1.646513] vgaarb: device added: PCI:0000:00:02.0,decodes=io+mem,owns=io+mem,locks=none
-[    1.647866] vgaarb: loaded
-[    1.648405] vgaarb: bridge control possible 0000:00:02.0
-[    1.650701] SCSI subsystem initialized
-[    1.659917] ACPI: bus type USB registered
-[    1.691614] usbcore: registered new interface driver usbfs
-[    1.694468] usbcore: registered new interface driver hub
-[    1.697431] usbcore: registered new device driver usb
-[    1.698362] PCI: Using ACPI for IRQ routing
-[    1.699326] NetLabel: Initializing
-[    1.699951] NetLabel:  domain hash size = 128
-[    1.702363] NetLabel:  protocols = UNLABELED CIPSOv4
-[    1.725962] NetLabel:  unlabeled traffic allowed by default
-[    1.726923] amd_nb: Cannot enumerate AMD northbridges
-[    1.729743] clocksource: Switched to clocksource kvm-clock
-[    1.748824] AppArmor: AppArmor Filesystem Enabled
-[    1.787045] pnp: PnP ACPI init
-[    1.809799] pnp: PnP ACPI: found 3 devices
-[    1.815879] clocksource: acpi_pm: mask: 0xffffff max_cycles: 0xffffff, max_idle_ns: 2085701024 ns
-[    1.837990] NET: Registered protocol family 2
-[    1.856551] TCP established hash table entries: 16384 (order: 5, 131072 bytes)
-[    1.859418] TCP bind hash table entries: 16384 (order: 6, 262144 bytes)
-[    1.868178] TCP: Hash tables configured (established 16384 bind 16384)
-[    1.873152] UDP hash table entries: 1024 (order: 3, 32768 bytes)
-[    1.876278] UDP-Lite hash table entries: 1024 (order: 3, 32768 bytes)
-[    1.877457] NET: Registered protocol family 1
-[    1.878338] pci 0000:00:00.0: Limiting direct PCI/PCI transfers
-[    1.879429] pci 0000:00:01.0: Activating ISA DMA hang workarounds
-[    1.880627] Unpacking initramfs...
-[    4.554673] Freeing initrd memory: 10836K
-[    4.583615] RAPL PMU detected, API unit is 2^-32 Joules, 4 fixed counters 10737418240 ms ovfl timer
-[    4.587999] hw unit of domain pp0-core 2^-0 Joules
-[    4.592007] hw unit of domain package 2^-0 Joules
-[    4.600792] hw unit of domain dram 2^-0 Joules
-[    4.614895] hw unit of domain pp1-gpu 2^-0 Joules
-[    4.616120] platform rtc_cmos: registered platform RTC device (no PNP device found)
-[    4.625557] Scanning for low memory corruption every 60 seconds
-[    4.663947] audit: initializing netlink subsys (disabled)
-[    4.678675] audit: type=2000 audit(1515168841.362:1): initialized
-[    4.699945] Initialise system trusted keyring
-[    4.707459] HugeTLB registered 2 MB page size, pre-allocated 0 pages
-[    4.712274] zbud: loaded
-[    4.717418] VFS: Disk quotas dquot_6.6.0
-[    4.727405] VFS: Dquot-cache hash table entries: 512 (order 0, 4096 bytes)
-[    4.729285] squashfs: version 4.0 (2009/01/31) Phillip Lougher
-[    4.746371] fuse init (API version 7.23)
-[    4.750615] Key type big_key registered
-[    4.756013] Allocating IMA MOK and blacklist keyrings.
-[    4.757477] Key type asymmetric registered
-[    4.760093] Asymmetric key parser 'x509' registered
-[    4.764697] Block layer SCSI generic (bsg) driver version 0.4 loaded (major 249)
-[    4.774892] io scheduler noop registered
-[    4.775885] io scheduler deadline registered (default)
-[    4.785243] io scheduler cfq registered
-[    4.787583] pci_hotplug: PCI Hot Plug PCI Core version: 0.5
-[    4.790390] pciehp: PCI Express Hot Plug Controller Driver version: 0.4
-[    4.793144] ACPI: AC Adapter [AC] (on-line)
-[    4.797577] input: Power Button as /devices/LNXSYSTM:00/LNXPWRBN:00/input/input0
-[    4.799512] ACPI: Power Button [PWRF]
-[    4.803911] input: Sleep Button as /devices/LNXSYSTM:00/LNXSLPBN:00/input/input1
-[    4.811064] ACPI: Sleep Button [SLPF]
-[    4.816061] ACPI: Battery Slot [BAT0] (battery present)
-[    4.822228] GHES: HEST is not enabled!
-[    4.830069] Serial: 8250/16550 driver, 32 ports, IRQ sharing enabled
-[    4.870932] 00:02: ttyS0 at I/O 0x3f8 (irq = 4, base_baud = 115200) is a 16550A
-[    4.884823] Linux agpgart interface v0.103
-[    4.903559] loop: module loaded
-[    4.914996] scsi host0: ata_piix
-[    4.922473] scsi host1: ata_piix
-[    4.923381] ata1: PATA max UDMA/33 cmd 0x1f0 ctl 0x3f6 bmdma 0xd000 irq 14
-[    4.926760] ata2: PATA max UDMA/33 cmd 0x170 ctl 0x376 bmdma 0xd008 irq 15
-[    4.941635] libphy: Fixed MDIO Bus: probed
-[    4.947102] tun: Universal TUN/TAP device driver, 1.6
-[    4.964840] tun: (C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>
-[    4.967773] PPP generic driver version 2.4.2
-[    4.971834] ehci_hcd: USB 2.0 'Enhanced' Host Controller (EHCI) Driver
-[    4.990452] ehci-pci: EHCI PCI platform driver
-[    4.991625] ehci-platform: EHCI generic platform driver
-[    4.999333] ohci_hcd: USB 1.1 'Open' Host Controller (OHCI) Driver
-[    5.028855] ohci-pci: OHCI PCI platform driver
-[    5.038359] ohci-platform: OHCI generic platform driver
-[    5.044054] uhci_hcd: USB Universal Host Controller Interface driver
-[    5.050043] i8042: PNP: PS/2 Controller [PNP0303:PS2K,PNP0f03:PS2M] at 0x60,0x64 irq 1,12
-[    5.069853] serio: i8042 KBD port at 0x60,0x64 irq 1
-[    5.072974] serio: i8042 AUX port at 0x60,0x64 irq 12
-[    5.078356] mousedev: PS/2 mouse device common for all mice
-[    5.090345] input: AT Translated Set 2 keyboard as /devices/platform/i8042/serio0/input/input2
-[    5.098935] rtc_cmos rtc_cmos: rtc core: registered rtc_cmos as rtc0
-[    5.108384] rtc_cmos rtc_cmos: alarms up to one day, 114 bytes nvram
-[    5.111234] i2c /dev entries driver
-[    5.112265] device-mapper: uevent: version 1.0.3
-[    5.124019] device-mapper: ioctl: 4.34.0-ioctl (2015-10-28) initialised: dm-devel@redhat.com
-[    5.130853] ledtrig-cpu: registered to indicate activity on CPUs
-[    5.135780] NET: Registered protocol family 10
-[    5.144249] NET: Registered protocol family 17
-[    5.146948] Key type dns_resolver registered
-[    5.149465] microcode: CPU0 sig=0x40651, pf=0x40, revision=0x0
-[    5.150911] microcode: Microcode Update Driver: v2.01 <tigran@aivazian.fsnet.co.uk>, Peter Oruba
-[    5.155915] registered taskstats version 1
-[    5.156952] Loading compiled-in X.509 certificates
-[    5.159059] Loaded X.509 cert 'Build time autogenerated kernel key: 7431eaeda5a51458aeb00f8de0f18f89e178d882'
-[    5.178945] zswap: loaded using pool lzo/zbud
-[    5.184498] Key type trusted registered
-[    5.195240] Key type encrypted registered
-[    5.203650] AppArmor: AppArmor sha1 policy hashing enabled
-[    5.227584] ima: No TPM chip found, activating TPM-bypass!
-[    5.228863] evm: HMAC attrs: 0x1
-[    5.230070]   Magic number: 2:209:236
-[    5.232575] rtc_cmos rtc_cmos: hash matches
-[    5.240314] rtc_cmos rtc_cmos: setting system clock to 2018-01-05 16:13:57 UTC (1515168837)
-[    5.243930] BIOS EDD facility v0.16 2004-Jun-25, 0 devices found
-[    5.245273] EDD information not available.
-[    5.257035] Freeing unused kernel memory: 1492K
-[    5.264822] Write protecting the kernel read-only data: 14336k
-[    5.267084] Freeing unused kernel memory: 1744K
-[    5.285834] Freeing unused kernel memory: 108K
-Loading, please wait...
-starting version[    5.302761] random: udevadm: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
- 229
-[    5.312041] random: systemd-udevd: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
-[    5.328888] random: udevadm: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
-[    5.340831] random: systemd-udevd: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
-[    5.350070] random: systemd-udevd: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
-[    5.378728] random: udevadm: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
-[    5.392327] random: systemd-udevd: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
-[    5.404648] random: udevadm: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
-[    5.409836] random: udevadm: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
-[    5.422705] random: systemd-udevd: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
-[    5.508506] e1000: Intel(R) PRO/1000 Network Driver - version 7.3.21-k8-NAPI
-[    5.522306] e1000: Copyright (c) 1999-2006 Intel Corporation.
-[    5.535939] Fusion MPT base driver 3.04.20
-[    5.556245] Copyright (c) 1999-2008 LSI Corporation
-[    5.613961] tsc: Refined TSC clocksource calibration: 2694.310 MHz
-[    5.615933] clocksource: tsc: mask: 0xffffffffffffffff max_cycles: 0x26d63f842ac, max_idle_ns: 440795296712 ns
-[    5.645199] AVX version of gcm_enc/dec engaged.
-[    5.655495] AES CTR mode by8 optimization enabled
-[    5.694053] Fusion MPT SPI Host driver 3.04.20
-[    5.800000] input: ImExPS/2 Generic Explorer Mouse as /devices/platform/i8042/serio1/input/input4
-[    6.090852] e1000 0000:00:03.0 eth0: (PCI:33MHz:32-bit) 02:0a:1a:84:64:1f
-[    6.110781] e1000 0000:00:03.0 eth0: Intel(R) PRO/1000 Network Connection
-[    6.125413] e1000 0000:00:03.0 enp0s3: renamed from eth0
-[    6.139246] mptbase: ioc0: Initiating bringup
-[    6.218269] ioc0: LSI53C1030 A0: Capabilities={Initiator}
-[    6.403150] scsi host2: ioc0: LSI53C1030 A0, FwRev=00000000h, Ports=1, MaxQ=256, IRQ=20
-[    6.538902] scsi 2:0:0:0: Direct-Access     VBOX     HARDDISK         1.0  PQ: 0 ANSI: 5
-[    6.554967] scsi target2:0:0: Beginning Domain Validation
-[    6.569097] scsi target2:0:0: Domain Validation skipping write tests
-[    6.589181] scsi target2:0:0: Ending Domain Validation
-[    6.597651] scsi target2:0:0: asynchronous
-[    6.606321] scsi 2:0:1:0: Direct-Access     VBOX     HARDDISK         1.0  PQ: 0 ANSI: 5
-[    6.622709] scsi target2:0:1: Beginning Domain Validation
-[    6.624682] scsi target2:0:1: Domain Validation skipping write tests
-[    6.626000] scsi target2:0:1: Ending Domain Validation
-[    6.627242] scsi target2:0:1: asynchronous
-[    6.635032] sd 2:0:0:0: Attached scsi generic sg0 type 0
-[    6.641328] sd 2:0:0:0: [sda] 20971520 512-byte logical blocks: (10.7 GB/10.0 GiB)
-[    6.651849] sd 2:0:1:0: [sdb] 20480 512-byte logical blocks: (10.5 MB/10.0 MiB)
-[    6.673513] sd 2:0:1:0: Attached scsi generic sg1 type 0
-[    6.676372] sd 2:0:0:0: [sda] Write Protect is off
-[    6.691842] sd 2:0:1:0: [sdb] Write Protect is off
-[    6.709704] sd 2:0:0:0: [sda] Incomplete mode parameter data
-[    6.721313] sd 2:0:0:0: [sda] Assuming drive cache: write through
-[    6.728759] sd 2:0:1:0: [sdb] Incomplete mode parameter data
-[    6.736396] sd 2:0:1:0: [sdb] Assuming drive cache: write through
-[    6.748334]  sda: sda1
-[    6.760527] sd 2:0:0:0: [sda] Attached SCSI disk
-[    6.771684] sd 2:0:1:0: [sdb] Attached SCSI disk
-[    8.626266] floppy0: no floppy controllers found
-Begin: Loading e[   10.019133] md: linear personality registered for level -1
-ssential drivers ... [   10.043445] md: multipath personality registered for level -4
-[   10.057136] md: raid0 personality registered for level 0
-[   10.076308] md: raid1 personality registered for level 1
-[   10.158252] raid6: sse2x1   gen()  5875 MB/s
-[   10.233900] raid6: sse2x1   xor()  4786 MB/s
-[   10.314057] raid6: sse2x2   gen()  7989 MB/s
-[   10.386193] raid6: sse2x2   xor()  5354 MB/s
-[   10.462232] raid6: sse2x4   gen()  8185 MB/s
-[   10.537881] raid6: sse2x4   xor()  5928 MB/s
-[   10.551918] raid6: using algorithm sse2x4 gen() 8185 MB/s
-[   10.560765] raid6: .... xor() 5928 MB/s, rmw enabled
-[   10.578265] raid6: using ssse3x2 recovery algorithm
-[   10.587324] xor: automatically using best checksumming function:
-[   10.641860]    avx       : 18427.000 MB/sec
-[   10.655968] async_tx: api initialized (async)
-[   10.683716] md: raid6 personality registered for level 6
-[   10.694983] md: raid5 personality registered for level 5
-[   10.701904] md: raid4 personality registered for level 4
-[   10.718385] md: raid10 personality registered for level 10
+[    0.256791] Calibrating delay loop (skipped) preset value.. 5387.52 BogoMIPS (lpj=10775040)
+[    0.258196] pid_max: default: 32768 minimum: 301
+[    0.282933] ACPI: Core revision 20150930
+[    0.286920] ACPI: 2 ACPI AML tables successfully acquired and loaded
+[    0.289196] Security Framework initialized
+[    0.289897] Yama: becoming mindful.
+[    0.290527] AppArmor: AppArmor initialized
+[    0.293492] Dentry cache hash table entries: 262144 (order: 9, 2097152 bytes)
+[    0.314727] Inode-cache hash table entries: 131072 (order: 8, 1048576 bytes)
+[    0.317712] Mount-cache hash table entries: 4096 (order: 3, 32768 bytes)
+[    0.318745] Mountpoint-cache hash table entries: 4096 (order: 3, 32768 bytes)
+[    0.319964] Initializing cgroup subsys io
+[    0.321960] Initializing cgroup subsys memory
+[    0.323909] Initializing cgroup subsys devices
+[    0.359042] Initializing cgroup subsys freezer
+[    0.364370] Initializing cgroup subsys net_cls
+[    0.365109] Initializing cgroup subsys perf_event
+[    0.365870] Initializing cgroup subsys net_prio
+[    0.368814] Initializing cgroup subsys hugetlb
+[    0.370765] Initializing cgroup subsys pids
+[    0.371550] CPU: Physical Processor ID: 0
+[    0.373101] mce: CPU supports 0 MCE banks
+[    0.373800] process: using mwait in idle threads
+[    0.408750] Last level iTLB entries: 4KB 1024, 2MB 1024, 4MB 1024
+[    0.412110] Last level dTLB entries: 4KB 1024, 2MB 1024, 4MB 1024, 1GB 4
+[    0.426255] Freeing SMP alternatives memory: 32K
+[    0.454155] ftrace: allocating 32154 entries in 126 pages
+[    0.550111] smpboot: APIC(0) Converting physical 0 to logical package 0
+[    0.565678] smpboot: Max logical packages: 1
+[    0.568595] x2apic enabled
+[    0.574172] Switched APIC routing to physical x2apic.
+[    0.576030] ..TIMER: vector=0x30 apic1=0 pin1=2 apic2=-1 pin2=-1
+[    0.686602] smpboot: CPU0: Intel(R) Core(TM) i7-4600U CPU @ 2.10GHz (family: 0x6, model: 0x45, stepping: 0x1)
+[    0.800229] Performance Events: unsupported p6 CPU model 69 no PMU driver, software events only.
+[    0.826382] KVM setup paravirtual spinlock
+[    0.831494] x86: Booted up 1 node, 1 CPUs
+[    0.834413] smpboot: Total of 1 processors activated (5387.52 BogoMIPS)
+[    0.837025] devtmpfs: initialized
+[    0.842382] evm: security.selinux
+[    0.867001] evm: security.SMACK64
+[    0.922199] evm: security.SMACK64EXEC
+[    0.929601] evm: security.SMACK64TRANSMUTE
+[    0.932117] evm: security.SMACK64MMAP
+[    0.932758] evm: security.ima
+[    0.935124] evm: security.capability
+[    0.935899] clocksource: jiffies: mask: 0xffffffff max_cycles: 0xffffffff, max_idle_ns: 7645041785100000 ns
+[    0.938608] futex hash table entries: 256 (order: 2, 16384 bytes)
+[    0.939623] pinctrl core: initialized pinctrl subsystem
+[    0.962321] RTC time: 16:25:05, date: 01/12/18
+[    0.963184] NET: Registered protocol family 16
+[    0.964077] cpuidle: using governor ladder
+[    0.964773] cpuidle: using governor menu
+[    0.975245] PCCT header not found.
+[    0.980197] ACPI: bus type PCI registered
+[    0.982858] acpiphp: ACPI Hot Plug PCI Controller Driver version: 0.5
+[    1.018467] PCI: Using configuration type 1 for base access
+[    1.029753] ACPI: Added _OSI(Module Device)
+[    1.030636] ACPI: Added _OSI(Processor Device)
+[    1.038944] ACPI: Added _OSI(3.0 _SCP Extensions)
+[    1.047032] ACPI: Added _OSI(Processor Aggregator Device)
+[    1.049748] ACPI: Executed 1 blocks of module-level executable AML code
+[    1.054446] ACPI: Interpreter enabled
+[    1.059003] ACPI: (supports S0 S5)
+[    1.059747] ACPI: Using IOAPIC for interrupt routing
+[    1.065892] PCI: Using host bridge windows from ACPI; if necessary, use "pci=nocrs" and report a bug
+[    1.071823] ACPI: PCI Root Bridge [PCI0] (domain 0000 [bus 00-ff])
+[    1.077628] acpi PNP0A03:00: _OSC: OS supports [ASPM ClockPM Segments MSI]
+[    1.086986] acpi PNP0A03:00: _OSC: not requesting OS control; OS requires [ExtendedConfig ASPM ClockPM MSI]
+[    1.088532] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended PCI configuration space under this bridge.
+[    1.099742] PCI host bridge to bus 0000:00
+[    1.100436] pci_bus 0000:00: root bus resource [io  0x0000-0x0cf7 window]
+[    1.102834] pci_bus 0000:00: root bus resource [io  0x0d00-0xffff window]
+[    1.104287] pci_bus 0000:00: root bus resource [mem 0x000a0000-0x000bffff window]
+[    1.106011] pci_bus 0000:00: root bus resource [mem 0x80000000-0xffdfffff window]
+[    1.110595] pci_bus 0000:00: root bus resource [bus 00-ff]
+[    1.121043] pci 0000:00:01.1: legacy IDE quirk: reg 0x10: [io  0x01f0-0x01f7]
+[    1.140109] pci 0000:00:01.1: legacy IDE quirk: reg 0x14: [io  0x03f6]
+[    1.144990] pci 0000:00:01.1: legacy IDE quirk: reg 0x18: [io  0x0170-0x0177]
+[    1.152251] pci 0000:00:01.1: legacy IDE quirk: reg 0x1c: [io  0x0376]
+[    1.202799] pci 0000:00:07.0: quirk: [io  0x4000-0x403f] claimed by PIIX4 ACPI
+[    1.212606] pci 0000:00:07.0: quirk: [io  0x4100-0x410f] claimed by PIIX4 SMB
+[    1.223401] ACPI: PCI Interrupt Link [LNKA] (IRQs 5 9 10 *11)
+[    1.233243] ACPI: PCI Interrupt Link [LNKB] (IRQs 5 9 10 *11)
+[    1.234523] ACPI: PCI Interrupt Link [LNKC] (IRQs 5 9 *10 11)
+[    1.244416] ACPI: PCI Interrupt Link [LNKD] (IRQs 5 *9 10 11)
+[    1.255721] ACPI: Enabled 2 GPEs in block 00 to 07
+[    1.256733] vgaarb: setting as boot device: PCI:0000:00:02.0
+[    1.257611] vgaarb: device added: PCI:0000:00:02.0,decodes=io+mem,owns=io+mem,locks=none
+[    1.260628] vgaarb: loaded
+[    1.261170] vgaarb: bridge control possible 0000:00:02.0
+[    1.262200] SCSI subsystem initialized
+[    1.268054] ACPI: bus type USB registered
+[    1.269714] usbcore: registered new interface driver usbfs
+[    1.270596] usbcore: registered new interface driver hub
+[    1.283853] usbcore: registered new device driver usb
+[    1.284803] PCI: Using ACPI for IRQ routing
+[    1.321977] NetLabel: Initializing
+[    1.322619] NetLabel:  domain hash size = 128
+[    1.347520] NetLabel:  protocols = UNLABELED CIPSOv4
+[    1.348331] NetLabel:  unlabeled traffic allowed by default
+[    1.349277] amd_nb: Cannot enumerate AMD northbridges
+[    1.357565] clocksource: Switched to clocksource kvm-clock
+[    1.433745] AppArmor: AppArmor Filesystem Enabled
+[    1.473747] pnp: PnP ACPI init
+[    1.508714] pnp: PnP ACPI: found 3 devices
+[    1.553191] clocksource: acpi_pm: mask: 0xffffff max_cycles: 0xffffff, max_idle_ns: 2085701024 ns
+[    1.630159] NET: Registered protocol family 2
+[    1.633259] TCP established hash table entries: 16384 (order: 5, 131072 bytes)
+[    1.648516] TCP bind hash table entries: 16384 (order: 6, 262144 bytes)
+[    1.649531] TCP: Hash tables configured (established 16384 bind 16384)
+[    1.652999] UDP hash table entries: 1024 (order: 3, 32768 bytes)
+[    1.653945] UDP-Lite hash table entries: 1024 (order: 3, 32768 bytes)
+[    1.654958] NET: Registered protocol family 1
+[    1.657145] pci 0000:00:00.0: Limiting direct PCI/PCI transfers
+[    1.661315] pci 0000:00:01.0: Activating ISA DMA hang workarounds
+[    1.662376] Unpacking initramfs...
+[    3.369314] Freeing initrd memory: 10836K
+[    3.376788] RAPL PMU detected, API unit is 2^-32 Joules, 4 fixed counters 10737418240 ms ovfl timer
+[    3.378246] hw unit of domain pp0-core 2^-0 Joules
+[    3.381438] hw unit of domain package 2^-0 Joules
+[    3.384156] hw unit of domain dram 2^-0 Joules
+[    3.384892] hw unit of domain pp1-gpu 2^-0 Joules
+[    3.386835] platform rtc_cmos: registered platform RTC device (no PNP device found)
+[    3.388151] Scanning for low memory corruption every 60 seconds
+[    3.390641] audit: initializing netlink subsys (disabled)
+[    3.395071] audit: type=2000 audit(1515774311.563:1): initialized
+[    3.398467] Initialise system trusted keyring
+[    3.399265] HugeTLB registered 2 MB page size, pre-allocated 0 pages
+[    3.403572] zbud: loaded
+[    3.404239] VFS: Disk quotas dquot_6.6.0
+[    3.404938] VFS: Dquot-cache hash table entries: 512 (order 0, 4096 bytes)
+[    3.410103] squashfs: version 4.0 (2009/01/31) Phillip Lougher
+[    3.412666] fuse init (API version 7.23)
+[    3.413456] Key type big_key registered
+[    3.414158] Allocating IMA MOK and blacklist keyrings.
+[    3.416207] Key type asymmetric registered
+[    3.416903] Asymmetric key parser 'x509' registered
+[    3.419230] Block layer SCSI generic (bsg) driver version 0.4 loaded (major 249)
+[    3.421991] io scheduler noop registered
+[    3.452698] io scheduler deadline registered (default)
+[    3.467401] io scheduler cfq registered
+[    3.522964] pci_hotplug: PCI Hot Plug PCI Core version: 0.5
+[    3.527872] pciehp: PCI Express Hot Plug Controller Driver version: 0.4
+[    3.530951] ACPI: AC Adapter [AC] (on-line)
+[    3.533936] input: Power Button as /devices/LNXSYSTM:00/LNXPWRBN:00/input/input0
+[    3.535185] ACPI: Power Button [PWRF]
+[    3.535930] input: Sleep Button as /devices/LNXSYSTM:00/LNXSLPBN:00/input/input1
+[    3.545128] ACPI: Sleep Button [SLPF]
+[    3.546391] ACPI: Battery Slot [BAT0] (battery present)
+[    3.557045] GHES: HEST is not enabled!
+[    3.558921] Serial: 8250/16550 driver, 32 ports, IRQ sharing enabled
+[    3.581733] 00:02: ttyS0 at I/O 0x3f8 (irq = 4, base_baud = 115200) is a 16550A
+[    3.648605] Linux agpgart interface v0.103
+[    3.659965] loop: module loaded
+[    3.660956] scsi host0: ata_piix
+[    3.661602] scsi host1: ata_piix
+[    3.662239] ata1: PATA max UDMA/33 cmd 0x1f0 ctl 0x3f6 bmdma 0xd000 irq 14
+[    3.672846] ata2: PATA max UDMA/33 cmd 0x170 ctl 0x376 bmdma 0xd008 irq 15
+[    3.696277] libphy: Fixed MDIO Bus: probed
+[    3.696980] tun: Universal TUN/TAP device driver, 1.6
+[    3.699208] tun: (C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>
+[    3.700202] PPP generic driver version 2.4.2
+[    3.700975] ehci_hcd: USB 2.0 'Enhanced' Host Controller (EHCI) Driver
+[    3.715138] ehci-pci: EHCI PCI platform driver
+[    3.715902] ehci-platform: EHCI generic platform driver
+[    3.716737] ohci_hcd: USB 1.1 'Open' Host Controller (OHCI) Driver
+[    3.719562] ohci-pci: OHCI PCI platform driver
+[    3.721806] ohci-platform: OHCI generic platform driver
+[    3.723035] uhci_hcd: USB Universal Host Controller Interface driver
+[    3.738341] i8042: PNP: PS/2 Controller [PNP0303:PS2K,PNP0f03:PS2M] at 0x60,0x64 irq 1,12
+[    3.754527] serio: i8042 KBD port at 0x60,0x64 irq 1
+[    3.759404] serio: i8042 AUX port at 0x60,0x64 irq 12
+[    3.760320] mousedev: PS/2 mouse device common for all mice
+[    3.763173] input: AT Translated Set 2 keyboard as /devices/platform/i8042/serio0/input/input2
+[    3.766228] rtc_cmos rtc_cmos: rtc core: registered rtc_cmos as rtc0
+[    3.797336] rtc_cmos rtc_cmos: alarms up to one day, 114 bytes nvram
+[    3.799578] i2c /dev entries driver
+[    3.800251] device-mapper: uevent: version 1.0.3
+[    3.801080] device-mapper: ioctl: 4.34.0-ioctl (2015-10-28) initialised: dm-devel@redhat.com
+[    3.823181] ledtrig-cpu: registered to indicate activity on CPUs
+[    3.833314] NET: Registered protocol family 10
+[    3.834209] NET: Registered protocol family 17
+[    3.837433] Key type dns_resolver registered
+[    3.846738] microcode: CPU0 sig=0x40651, pf=0x40, revision=0x0
+[    3.878877] microcode: Microcode Update Driver: v2.01 <tigran@aivazian.fsnet.co.uk>, Peter Oruba
+[    3.989751] registered taskstats version 1
+[    4.014883] Loading compiled-in X.509 certificates
+[    4.017715] Loaded X.509 cert 'Build time autogenerated kernel key: 7431eaeda5a51458aeb00f8de0f18f89e178d882'
+[    4.034521] zswap: loaded using pool lzo/zbud
+[    4.036304] Key type trusted registered
+[    4.038915] Key type encrypted registered
+[    4.055519] AppArmor: AppArmor sha1 policy hashing enabled
+[    4.108375] ima: No TPM chip found, activating TPM-bypass!
+[    4.128069] evm: HMAC attrs: 0x1
+[    4.131780]   Magic number: 2:940:438
+[    4.134397] rtc_cmos rtc_cmos: setting system clock to 2018-01-12 16:25:08 UTC (1515774308)
+[    4.137283] BIOS EDD facility v0.16 2004-Jun-25, 0 devices found
+[    4.174914] EDD information not available.
+[    4.219733] Freeing unused kernel memory: 1492K
+[    4.243669] Write protecting the kernel read-only data: 14336k
+[    4.244914] Freeing unused kernel memory: 1744K
+[    4.247109] Freeing unused kernel memory: 108K
+Loading, please [    4.275021] random: udevadm: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
+wait...
+startin[    4.363784] random: systemd-udevd: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
+g version 229
+[    4.389480] tsc: Refined TSC clocksource calibration: 2693.194 MHz
+[    4.401943] clocksource: tsc: mask: 0xffffffffffffffff max_cycles: 0x26d220f3262, max_idle_ns: 440795283780 ns
+[    4.416982] random: systemd-udevd: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
+[    4.427304] random: systemd-udevd: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
+[    4.442927] random: systemd-udevd: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
+[    4.445119] random: udevadm: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
+[    4.455191] random: systemd-udevd: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
+[    4.470361] random: udevadm: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
+[    4.473229] random: udevadm: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
+[    4.482163] random: systemd-udevd: uninitialized urandom read (16 bytes read, 2 bits of entropy available)
+[    4.584220] e1000: Intel(R) PRO/1000 Network Driver - version 7.3.21-k8-NAPI
+[    4.688969] e1000: Copyright (c) 1999-2006 Intel Corporation.
+[    4.700629] Fusion MPT base driver 3.04.20
+[    4.704710] Copyright (c) 1999-2008 LSI Corporation
+[    4.759681] AVX version of gcm_enc/dec engaged.
+[    4.816402] AES CTR mode by8 optimization enabled
+[    4.857738] Fusion MPT SPI Host driver 3.04.20
+[    4.894173] mptbase: ioc0: Initiating bringup
+[    4.935610] input: ImExPS/2 Generic Explorer Mouse as /devices/platform/i8042/serio1/input/input4
+[    5.046602] ioc0: LSI53C1030 A0: Capabilities={Initiator}
+[    5.246451] scsi host2: ioc0: LSI53C1030 A0, FwRev=00000000h, Ports=1, MaxQ=256, IRQ=20
+[    5.507547] scsi 2:0:0:0: Direct-Access     VBOX     HARDDISK         1.0  PQ: 0 ANSI: 5
+[    5.596163] scsi target2:0:0: Beginning Domain Validation
+[    5.597610] scsi target2:0:0: Domain Validation skipping write tests
+[    5.601121] scsi target2:0:0: Ending Domain Validation
+[    5.603650] scsi target2:0:0: asynchronous
+[    5.676389] scsi 2:0:1:0: Direct-Access     VBOX     HARDDISK         1.0  PQ: 0 ANSI: 5
+[    5.711756] scsi target2:0:1: Beginning Domain Validation
+[    5.773532] scsi target2:0:1: Domain Validation skipping write tests
+[    5.785496] scsi target2:0:1: Ending Domain Validation
+[    5.829354] scsi target2:0:1: asynchronous
+[    5.997257] sd 2:0:0:0: Attached scsi generic sg0 type 0
+[    6.027056] sd 2:0:0:0: [sda] 20971520 512-byte logical blocks: (10.7 GB/10.0 GiB)
+[    6.035287] sd 2:0:0:0: [sda] Write Protect is off
+[    6.038133] sd 2:0:0:0: [sda] Incomplete mode parameter data
+[    6.042389] sd 2:0:0:0: [sda] Assuming drive cache: write through
+[    6.083697] sd 2:0:1:0: [sdb] 20480 512-byte logical blocks: (10.5 MB/10.0 MiB)
+[    6.109080] sd 2:0:1:0: Attached scsi generic sg1 type 0
+[    6.117987] e1000 0000:00:03.0 eth0: (PCI:33MHz:32-bit) 02:0a:1a:84:64:1f
+[    6.170822] e1000 0000:00:03.0 eth0: Intel(R) PRO/1000 Network Connection
+[    6.254058] sd 2:0:1:0: [sdb] Write Protect is off
+[    6.293243] e1000 0000:00:03.0 enp0s3: renamed from eth0
+[    6.305093] sd 2:0:1:0: [sdb] Incomplete mode parameter data
+[    6.323069] sd 2:0:1:0: [sdb] Assuming drive cache: write through
+[    6.325292]  sda: sda1
+[    6.326360] sd 2:0:0:0: [sda] Attached SCSI disk
+[    6.342542] sd 2:0:1:0: [sdb] Attached SCSI disk
+[    7.849858] floppy0: no floppy controllers found
+Begin: Loading e[    9.204901] md: linear personality registered for level -1
+ssential drivers[    9.326111] md: multipath personality registered for level -4
+ ... [    9.391342] md: raid0 personality registered for level 0
+[    9.400346] md: raid1 personality registered for level 1
+[    9.569946] raid6: sse2x1   gen()  8929 MB/s
+[    9.661742] raid6: sse2x1   xor()  7537 MB/s
+[    9.738410] raid6: sse2x2   gen() 11888 MB/s
+[    9.841905] raid6: sse2x2   xor()  8105 MB/s
+[    9.969864] raid6: sse2x4   gen() 13864 MB/s
+[   10.061649] raid6: sse2x4   xor()  7452 MB/s
+[   10.071282] raid6: using algorithm sse2x4 gen() 13864 MB/s
+[   10.080599] raid6: .... xor() 7452 MB/s, rmw enabled
+[   10.083561] raid6: using ssse3x2 recovery algorithm
+[   10.091035] xor: automatically using best checksumming function:
+[   10.133738]    avx       : 20041.000 MB/sec
+[   10.143240] async_tx: api initialized (async)
+[   10.156309] md: raid6 personality registered for level 6
+[   10.172071] md: raid5 personality registered for level 5
+[   10.172908] md: raid4 personality registered for level 4
+[   10.184860] md: raid10 personality registered for level 10
 done.
-Begin: Running /scripts/init-premount ... done.
+Begin: Running /scripts/init-p[   10.228680] Btrfs loaded
+remount ... done.
 Begin: Mounting root file system ... Begin: Running /scripts/local-top ... done.
-Begin: Running /scripts/local-premount ... [   10.766143] Btrfs loaded
-Scanning for Btrfs filesystems
+Begin: Running /scripts/local-premount ... Scanning for Btrfs filesystems
 done.
-Warning: fsck not present, so skipping root file system
-[   10.865714] EXT4-fs (sda1): mounted filesystem with ordered data mode. Opts: (null)
+Warning: fsck not present, so skipping ro[   10.466800] EXT4-fs (sda1): mounted filesystem with ordered data mode. Opts: (null)
+ot file system
 done.
 Begin: Running /scripts/local-bottom ... done.
 Begin: Running /scripts/init-bottom ... done.
-[   11.834665] systemd[1]: systemd 229 running in system mode. (+PAM +AUDIT +SELINUX +IMA +APPARMOR +SMACK +SYSVINIT +UTMP +LIBCRYPTSETUP +GCRYPT +GNUTLS +ACL +XZ -LZ4 +SECCOMP +BLKID +ELFUTILS +KMOD -IDN)
-[   11.949185] systemd[1]: Detected virtualization oracle.
-[   11.950029] systemd[1]: Detected architecture x86-64.
-[   12.010823] random: nonblocking pool is initialized
+[   11.226106] random: nonblocking pool is initialized
+[   11.721656] systemd[1]: systemd 229 running in system mode. (+PAM +AUDIT +SELINUX +IMA +APPARMOR +SMACK +SYSVINIT +UTMP +LIBCRYPTSETUP +GCRYPT +GNUTLS +ACL +XZ -LZ4 +SECCOMP +BLKID +ELFUTILS +KMOD -IDN)
+[   11.899319] systemd[1]: Detected virtualization oracle.
+[   11.962516] systemd[1]: Detected architecture x86-64.
 
 Welcome to Ubuntu 16.04.3 LTS!
 
-[   12.337008] systemd[1]: Set hostname to <ubuntu>.
-[   12.436322] systemd[1]: Initializing machine ID from random generator.
-[   12.479953] systemd[1]: Installed transient /etc/machine-id file.
-[   14.042559] systemd[1]: Created slice User and Session Slice.
-[  OK  ] Created slice User and Session Slice.
-[   14.118524] systemd[1]: Listening on udev Kernel Socket.
-[  OK  ] Listening on udev Kernel Socket.
-[   14.259175] systemd[1]: Listening on Device-mapper event daemon FIFOs.
+[   12.091866] systemd[1]: Set hostname to <ubuntu>.
+[   12.153482] systemd[1]: Initializing machine ID from random generator.
+[   12.185309] systemd[1]: Installed transient /etc/machine-id file.
+[   13.503456] systemd[1]: Listening on Device-mapper event daemon FIFOs.
 [  OK  ] Listening on Device-mapper event daemon FIFOs.
-[   14.347053] systemd[1]: Created slice System Slice.
+[   13.529950] systemd[1]: Started Forward Password Requests to Wall Directory Watch.
+[  OK  ] Started Forward Password Requests to Wall Directory Watch.
+[   13.614555] systemd[1]: Created slice System Slice.
 [  OK  ] Created slice System Slice.
-[   14.455007] systemd[1]: Reached target Slices.
-[  OK  ] Reached target Slices.
-[   14.524421] systemd[1]: Listening on Journal Socket.
+[   13.673869] systemd[1]: Listening on Journal Socket (/dev/log).
+[  OK  ] Listening on Journal Socket (/dev/log).
+[   13.822407] systemd[1]: Started Trigger resolvconf update for networkd DNS.
+[  OK  ] Started Trigger resolvconf update for networkd DNS.
+[   13.974332] systemd[1]: Listening on Journal Audit Socket.
+[  OK  ] Listening on Journal Audit Socket.
+[   14.077952] systemd[1]: Reached target Swap.
+[  OK  ] Reached target Swap.
+[   14.154910] systemd[1]: Listening on Journal Socket.
 [  OK  ] Listening on Journal Socket.
-[   14.575708] systemd[1]: Starting Create list of required static device nodes for the current kernel...
-         Starting Create list of required st... nodes for the current kernel...
-[   14.617453] systemd[1]: Starting Set console keymap...
-         Starting Set console keymap...
-[   14.820448] systemd[1]: Starting Load Kernel Modules...
-         Starting Load Kernel Modules...
-[   14.960878] systemd[1]: Mounting Huge Pages File System...
+[   14.249620] systemd[1]: Mounting Huge Pages File System...
          Mounting Huge Pages File System...
-[   15.052620] systemd[1]: Starting Uncomplicated firewall...
-         Starting Uncomplicated firewall...
-[   15.134220] systemd[1]: Listening on udev Control Socket.
-[  OK  ] Listening on udev Control Socket.
-[   15.302431] systemd[1]: Listening on LVM2 metadata daemon socket.
-[  OK  ] Listening on LVM2 metadata daemon socket.
-[   15.427322] systemd[1]: Starting Remount Root and Kernel File Systems...
-[   15.510568] Loading iSCSI transport class v2.0-870.
-         Starting Remount Root and Kernel File Systems...
-[   15.631545] systemd[1]: Created slice system-serial\x2dgetty.slice.
-[  OK  ] Created slice system-serial\x2dgetty.slice.
-[   15.676991] systemd[1]: Listening on Syslog Socket.
-[  OK  ] Listening on Syslog Socket.
-[   15.703157] systemd[1]: Mounting POSIX Message Queue File System...
-[   15.714120] EXT4-fs (sda1): re-mounted. Opts: (null)
-         [   15.727986] iscsi: registered transport (tcp)
-Mounting POSIX Message Queue File System...
-[   15.782785] systemd[1]: Mounting Debug File System...
+[   14.353434] systemd[1]: Mounting Debug File System...
          Mounting Debug File System...
-[   15.818910] systemd[1]: Reached target Encrypted Volumes.
-[  OK  ] Reached targe[   15.879564] iscsi: registered transport (iser)
-t Encrypted Volumes.
-[   16.010932] systemd[1]: Listening on Journal Audit Socket.
-[  OK  ] Listening on Journal Audit Socket.
-[   16.086878] systemd[1]: Set up automount Arbitrary Executable File Formats File System Automount Point.
+[   14.439859] systemd[1]: Mounting POSIX Message Queue File System...
+         Mounting POSIX Message Queue File System...
+[   14.517758] systemd[1]: Starting Remount Root and Kernel File Systems...
+         Starting Remount Root and Kernel File Systems...
+[   14.577090] systemd[1]: Created slice system-serial\x2dgetty.slice.
+[[   14.583214] EXT4-fs (sda1): re-mounted. Opts: (null)
+  OK  ] Created slice system-serial\x2dgetty.slice.
+[   14.614228] systemd[1]: Reached target Encrypted Volumes.
+[  OK  ] Reached target Encrypted Volumes.
+[   14.658211] systemd[1]: Set up automount Arbitrary Executable File Formats File System Automount Point.
 [  OK  ] Set up automount Arbitrary Executab...ats File System Automount Point.
-[   16.342330] systemd[1]: Started Forward Password Requests to Wall Directory Watch.
-[  OK  ] Started Forward Password Requests to Wall Directory Watch.
-[   16.490513] systemd[1]: Reached target User and Group Name Lookups.
-[  OK  ] Reached target User and Group Name Lookups.
-[   16.618972] systemd[1]: Started Trigger resolvconf update for networkd DNS.
-[  OK  ] Started Trigger resolvconf update for networkd DNS.
-[   16.674018] systemd[1]: Reached target Swap.
-[  OK  ] Reached target Swap.
-[   16.741494] systemd[1]: Starting Nameserver information manager...
+[   14.759259] systemd[1]: Starting Nameserver information manager...
          Starting Nameserver information manager...
-[   16.804642] systemd[1]: Starting Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling...
-         Starting Monitoring of LVM2 mirrors... dmeventd or progress polling...
-[   16.838648] systemd[1]: Listening on LVM2 poll daemon socket.
-[  OK  ] Listening on LVM2 poll daemon socket.
-[   16.878340] systemd[1]: Listening on Journal Socket (/dev/log).
-[  OK  ] Listening on Journal Socket (/dev/log).
-[   16.930947] systemd[1]: Starting Journal Service...
-         Starting Journal Service...
-[   16.986021] systemd[1]: Listening on /dev/initctl Compatibility Named Pipe.
+[   14.872773] systemd[1]: Listening on udev Control Socket.
+[  OK  ] Listening on udev Control Socket.
+[   14.988643] systemd[1]: Starting Load Kernel Modules...
+         Starting Load Kernel Modules...
+[   15.037865] systemd[1]: Starting Set console keymap...
+         Starting Set console keymap...
+[   15.163423] systemd[1]: Listening on udev Kernel Socket.
+[  OK  ] Listening on udev Kernel Sock[   15.295444] Loading iSCSI transport class v2.0-870.
+et.
+[   15.386156] systemd[1]: Starting Uncomplicated firewall...
+         Starting Uncomplicated firewall...
+[   15.463024] systemd[1]: Listening on /dev/initctl Compatibility Named Pipe.
+[   15.538173] iscsi: registered transport (tcp)
 [  OK  ] Listening on /dev/initctl Compatibility Named Pipe.
-[   17.132177] systemd[1]: Mounted Debug File System.
+[   15.630217] systemd[1]: Listening on LVM2 poll daemon socket.
+[  OK  ] Listening on [   15.671046] iscsi: registered transport (iser)
+LVM2 poll daemon socket.
+[   15.698146] systemd[1]: Listening on Syslog Socket.
+[  OK  ] Listening on Syslog Socket.
+[   15.718352] systemd[1]: Starting Journal Service...
+         Starting Journal Service...
+[   15.815684] systemd[1]: Starting Create list of required static device nodes for the current kernel...
+         Starting Create list of required st... nodes for the current kernel...
+[   15.995151] systemd[1]: Listening on LVM2 metadata daemon socket.
+[  OK  ] Listening on LVM2 metadata daemon socket.
+[   16.034599] systemd[1]: Starting Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling...
+         Starting Monitoring of LVM2 mirrors... dmeventd or progress polling...
+[   16.082137] systemd[1]: Created slice User and Session Slice.
+[  OK  ] Created slice User and Session Slice.
+[   16.118239] systemd[1]: Reached target Slices.
+[  OK  ] Reached target Slices.
+[   16.166091] systemd[1]: Reached target User and Group Name Lookups.
+[  OK  ] Reached target User and Group Name Lookups.
+[   16.299022] systemd[1]: Mounted Debug File System.
 [  OK  ] Mounted Debug File System.
-[   17.215012] systemd[1]: Mounted Huge Pages File System.
+[   16.427265] systemd[1]: Mounted Huge Pages File System.
 [  OK  ] Mounted Huge Pages File System.
-[   17.318815] systemd[1]: Mounted POSIX Message Queue File System.
+[   16.530661] systemd[1]: Mounted POSIX Message Queue File System.
 [  OK  ] Mounted POSIX Message Queue File System.
-[   17.471996] systemd[1]: Started Create list of required static device nodes for the current kernel.
-[  OK  ] Started Create list of required sta...ce nodes for the current kernel.
-[   17.627179] systemd[1]: Started Set console keymap.
-[  OK  ] Started Set console keymap.
-[   17.651319] systemd[1]: Started Load Kernel Modules.
+[   16.634561] systemd[1]: Started Journal Service.
+[  OK  ] Started Journal Service.
+[  OK  ] Started Remount Root and Kernel File Systems.
 [  OK  ] Started Load Kernel Modules.
-[   17.768572] systemd[1]: Started Uncomplicated firewall.
+[  OK  ] Started Set console keymap.
 [  OK  ] Started Uncomplicated firewall.
-[   17.852533] systemd[1]: Started Remount Root and Kernel File Systems.
-[  OK  ] Started Remount Root and Kernel File Systems.
-[   17.931246] systemd[1]: Started Journal Service.
-[  OK  ] Started Journal Service.
+[  OK  ] Started Create list of required sta...ce nodes for the current kernel.
+[  OK  ] Started Nameserver information manager.
 [  OK  ] Started LVM2 metadata daemon.
+         Starting Create Static Device Nodes in /dev...
+         Mounting FUSE Control File System...
+         Starting Apply Kernel Variables...
+         Starting Load/Save Random Seed...
          Starting Initial cloud-init job (pre-networking)...
          Starting udev Coldplug all Devices...
          Starting Flush Journal to Persistent Storage...
-         Starting Load/Save Random Seed...
-         Starting Apply Kernel Variables...
-         Mounting FUSE Control File System...
-         Starting Create Static Device Nodes in /dev...
 [  OK  ] Mounted FUSE Control File System.
-[  OK  ] Started Nameserver information manager.
 [  OK  ] Started Load/Save Random Seed.
+[  OK  ] Started Apply Kernel Variables.
 [  OK  ] Started udev Coldplug all Devices.
-[   18.385508] systemd-journald[411]: Received request to flush runtime journal from PID 1
 [  OK  ] Started Monitoring of LVM2 mirrors,...ng dmeventd or progress polling.
-[  OK  ] Started Flush Journal to Persistent Storage.
-[  OK  ] Started Apply Kernel Variables.
 [  OK  ] Started Create Static Device Nodes in /dev.
          Starting udev Kernel Device Manager...
+[  OK  ] Started Flush Journal to Persistent Storage.
 [  OK  ] Started udev Kernel Device Manager.
 [  OK  ] Started Dispatch Password Requests to Console Directory Watch.
 [  OK  ] Reached target Local File Systems (Pre).
 [  OK  ] Reached target Local File Systems.
-         Starting Set console font and keymap...
-         Starting Create Volatile Files and Directories...
          Starting Commit a transient machine-id on disk...
          Starting LSB: AppArmor initialization...
+         Starting Set console font and keymap...
          Starting Tell Plymouth To Write Out Runtime Data...
-[  OK  ] Started Create Volatile Files and Directories.
+         Starting Create Volatile Files and Directories...
 [  OK  ] Started Commit a transient machine-id on disk.
 [  OK  ] Started Tell Plymouth To Write Out Runtime Data.
 [  OK  ] Found device /dev/ttyS0.
+[  OK  ] Started Create Volatile Files and Directories.
 [  OK  ] Reached target System Time Synchronized.
          Starting Update UTMP about System Boot/Shutdown...
 [  OK  ] Started Update UTMP about System Boot/Shutdown.
@@ -510,177 +501,177 @@ t Encrypted Volumes.
 [  OK  ] Created slice system-getty.slice.
 [  OK  ] Listening on Load/Save RF Kill Switch Status /dev/rfkill Watch.
 [  OK  ] Started LSB: AppArmor initialization.
-[   25.589709] cloud-init[421]: Cloud-init v. 0.7.9 running 'init-local' at Fri, 05 Jan 2018 16:14:17 +0000. Up 24.29 seconds.
+[   23.979073] cloud-init[448]: Cloud-init v. 0.7.9 running 'init-local' at Fri, 12 Jan 2018 16:25:28 +0000. Up 23.12 seconds.
 [  OK  ] Started Initial cloud-init job (pre-networking).
 [  OK  ] Reached target Network (Pre).
          Starting Raise network interfaces...
 [  OK  ] Started Raise network interfaces.
 [  OK  ] Reached target Network.
          Starting Initial cloud-init job (metadata service crawler)...
-[   28.882271] cloud-init[941]: Cloud-init v. 0.7.9 running 'init' at Fri, 05 Jan 2018 16:14:20 +0000. Up 26.38 seconds.
-[   28.908681] cloud-init[941]: ci-info: +++++++++++++++++++++++++++++++++++++Net device info+++++++++++++++++++++++++++++++++++++
-[   28.918584] cloud-init[941]: ci-info: +--------+------+---------------------------+---------------+-------+-------------------+
-[   28.928871] cloud-init[941]: ci-info: | Device |  Up  |          Address          |      Mask     | Scope |     Hw-Address    |
-[   28.932255] cloud-init[941]: ci-info: +--------+------+---------------------------+---------------+-------+-------------------+
-[   28.944354] cloud-init[941]: ci-info: | enp0s3 | True |         10.0.2.15         | 255.255.255.0 |   .   | 02:0a:1a:84:64:1f |
-[   28.963718] [  OK  ] Started Initial cloud-init job (metadata service crawler).
+[   27.757580] cloud-init[957]: Cloud-init v. 0.7.9 running 'init' at Fri, 12 Jan 2018 16:25:30 +0000. Up 25.03 seconds.
+[   27.790452] cloud-init[957]: ci-info: +++++++++++++++++++++++++++++++++++++Net device info+++++++++++++++++++++++++++++++++++++
+[   27.817874] cloud-init[957]: ci-info: +--------+------+---------------------------+---------------+-------+-------------------+
+[   27.836487] cloud-init[957]: ci-info: | Device |  Up  |          Address          |      Mask     | Scope |     Hw-Address    |
+[  OK  ] Started Initial cloud-init job (metadata service crawler).
+[   27.858377] cloud-init[957]: ci-info: +--------+------+---------------------------+---------------+-------+-------------------+
+[   27.926009] cloud-init[957]: ci-info: |   lo   | True |         127.0.0.1         |   255.0.0.0   |   .   |         .         |
+[   27.947398] cloud-init[957]: ci-info: |   lo   | True |          ::1/128          |       .       |  host |         .         |
 [  OK  ] Reached target Cloud-config availability.
-[  OK  ] Reached target System Initialization.
-[  OK  ] Listening on UUID daemon activation socket.
-         Starting Socket activation for snappy daemon.
+[  OK  ] Reached target Network is Online.
+[   28.006465] cloud-init[957]: ci-info: | enp0s3 | True |         10.0.2.15         | 255.255.255.0 |   .   | 02:0a:1a:84:64:1f |
+         Starting iSCSI initiator daemon (iscsid)...
+[[   28.104100] cloud-init[957]: ci-info: | enp0s3 | True | fe80::a:1aff:fe84:641f/64 |       .       |  link | 02:0a:1a:84:64:1f |
+[   28.108354] cloud-init[957]: ci-info: +--------+------+---------------------------+---------------+-------+-------------------+
+[   28.108388] cloud-init[957]: ci-info: +++++++++++++++++++++++++++Route IPv4 info++++++++++++++++++++++++++++
+[   28.108415] cloud-init[957]: ci-info: +-------+-------------+----------+---------------+-----------+-------+
+[   28.108456] cloud-init[957]: ci-info: | Route | Destination | Gateway  |    Genmask    | Interface | Flags |
+[   28.108486] cloud-init[957]: ci-info: +-------+-------------+----------+---------------+-----------+-------+
+[   28.108517] cloud-init[957]: ci-info: |   0   |   0.0.0.0   | 10.0.2.2 |    0.0.0.0    |   enp0s3  |   UG  |
+[   28.108543] cloud-init[957]: ci-info: |   1   |   10.0.2.0  | 0.0.0.0  | 255.255.255.0 |   enp0s3  |   U   |
+[   28.108568] cloud-init[957]: ci-info: +-------+-------------+----------+---------------+-----------+-------+
+[   28.108742] cloud-init[957]: Generating public/private rsa key pair.
+[   28.108775] cloud-init[957]: Your identification has been saved in /etc/ssh/ssh_host_rsa_key.
+[   28.108804] cloud-init[957]: Your public key has been saved in /etc/ssh/ssh_host_rsa_key.pub.
+[   28.108828] cloud-init[957]: The key fingerprint is:
+[   28.108853] cloud-init[957]: SHA256:V91RkFaDF9+XI2WH7hR1S4znUXSvnC8CpXUUoBdEMHw root@ubuntu-xenial
+[   28.108878] cloud-init[957]: The key's randomart image is:
+[   28.108902] cloud-init[957]: +---[RSA 2048]----+
+[   28.108929] cloud-init[957]: |         .o+=.B/&|
+[   28.108973] cloud-init[957]: |          .oE*O*%|
+[   28.109008] cloud-init[957]: |          ..=+*=B|
+[   28.109034] cloud-init[957]: |           * o++o|
+[   28.109059] cloud-init[957]: |        S +  o+  |
+[   28.109084] cloud-init[957]: |         . .  .. |
+[   28.109108] cloud-init[957]: |            . . .|
+[   28.109148] cloud-init[957]: |             . . |
+[   28.109189] cloud-init[957]: |                 |
+[   28.109235] cloud-init[957]: +----[SHA256]-----+
+[   28.109261] cloud-init[957]: Generating public/private dsa key pair.
+[   28.109285] cloud-init[957]: Your identification has been saved in /etc/ssh/ssh_host_dsa_key.
+[   28.109309] cloud-init[957]: Your public key has been saved in /etc/ssh/ssh_host_dsa_key.pub.
+[   28.109334] cloud-init[957]: The key fingerprint is:
+[   28.109358] cloud-init[957]: SHA256:Gy3BhAUZK67IVjGWoEJ9ZjL1iYQfFjqCZjJu7cuY5II root@ubuntu-xenial
+[   28.109384] cloud-init[957]: The key's randomart image is:
+[   28.109410] cloud-init[957]: +---[DSA 1024]----+
+[   28.109434] cloud-init[957]: | o. o++*o        |
+[   28.109459] cloud-init[957]: |+ .=+*=+.        |
+[   28.109483] cloud-init[957]: |*+ BX.ooo        |
+[   28.109506] cloud-init[957]: |*.+.+o   o       |
+[   28.109531] cloud-init[957]: | o o.   S .      |
+[   28.109554] cloud-init[957]: |o.o.     +       |
+[   28.109578] cloud-init[957]: |o+..    .        |
+[   28.109603] cloud-init[957]: |E + .            |
+[   28.109627] cloud-init[957]: |.+ o             |
+[   28.109651] cloud-init[957]: +----[SHA256]-----+
+[   28.109675] cloud-init[957]: Generating public/private ecdsa key pair.
+[   28.109699] cloud-init[957]: Your identification has been saved in /etc/ssh/ssh_host_ecdsa_key.
+[   28.109724] cloud-init[957]: Your public key has been saved in /etc/ssh/ssh_host_ecdsa_key.pub.
+[   28.109748] cloud-init[957]: The key fingerprint is:
+[   28.109772] cloud-init[957]: SHA256:LHPuX+xSX6TWlMG9eA2eVMHUlpNFqBi3/rMRUQSgxSo root@ubuntu-xenial
+[   28.109796] cloud-init[957]: The key's randomart image is:
+[   28.109819] cloud-init[957]: +---[ECDSA 256]---+
+[   28.109843] cloud-init[957]: |           .o.=O&|
+[   28.109869] cloud-init[957]: |          .oo +O+|
+[   28.109893] cloud-init[957]: |          .= =o+*|
+[   28.109918] cloud-init[957]: |       .E o o.o=+|
+[   28.109941] cloud-init[957]: |      o S. .  o= |
+[   28.109965] cloud-init[957]: |       =   .o o.o|
+[   28.109989] cloud-init[957]: |        .  .o+.. |
+[   29.113915] cloud-init[957]: |       .  .o  +. |
+[   29.113955] cloud-init[957]: |        ..... .o |
+[   29.113982] cloud-init[957]: +----[SHA256]-----+
+[   29.114006] cloud-init[957]: Generating public/private ed25519 key pair.
+[   29.114030] cloud-init[957]: Your identification has been saved in /etc/ssh/ssh_host_ed25519_key.
+[   29.114055] cloud-init[957]: Your public key has been saved in /etc/ssh/ssh_host_ed25519_key.pub.
+[   29.114086] cloud-init[957]: The key fingerprint is:
+[   29.114110] cloud-init[957]: SHA256:LyvReAP8OAsny5vYKNvrFLrJ1WLKnq2rNa7pXvOGAzE root@ubuntu-xenial
+[   29.114134] cloud-init[957]: The key's randomart image is:
+[   29.114160] cloud-init[957]: +--[ED25519 256]--+
+[   29.114185] cloud-init[957]: |                 |
+[   29.114209] cloud-init[957]: |                 |
+[   29.114232] cloud-init[957]: |     .           |
+[   29.114256] cloud-init[957]: | E    o          |
+[   29.114281] cloud-init[957]: |  +    *S        |
+[   29.114328] cloud-init[957]: | o .+ * =.       |
+[   29.114356] cloud-init[957]: |. =B.* =...      |
+[   29.114381] cloud-init[957]: |+OBB*oo  o       |
+[   29.114589] cloud-init[957]: |%/Xo*o ..        |
+[   29.114620] cloud-init[957]: +----[SHA256]-----+
+  OK  ] Reached target System Initialization.
 [  OK  ] Started Daily Cleanup of Temporary Directories.
-cloud-init[941]: ci-info: | enp0s3 | True | fe80::a:1aff:fe84:641f/64 |       .       |  link | 02:0a:1a:84:64:1f |
-[   29.080946] cloud-init[941]: ci-info: |   lo   | True |         127.0.0.1         |   255.0.0.0   |   .   |         .         |
-[   29.109025] cloud-init[941]: ci-info: |   lo   | True |          ::1/128          |       .       |  host |         .         |
-[   29.144092] cloud-init[941]: ci-info: +--------+------+---------------------------+---------------+-------+-------------------+
-[   29.168152] cloud-init[941]: ci-info: +++++++++++++++++++++++++++Route IPv4 info++++++++++++++++++++++++++++
-[   29.168416] cloud-init[941]: ci-info: +-------+-------------+----------+---------------+-----------+-------+
-[   29.168448] cloud-init[941]: ci-info: | Route | Destination | Gateway  |    Genmask    | Interface | Flags |
-[   29.168485] cloud-init[941]: ci-info: +-------+-------------+----------+---------------+-----------+-------+
-[   29.168513] cloud-init[941]: ci-info: |   0   |   0.0.0.0   | 10.0.2.2 |    0.0.0.0    |   enp0s3  |   UG  |
-[   29.168539] cloud-init[941]: ci-info: |   1   |   10.0.2.0  | 0.0.0.0  | 255.255.255.0 |   enp0s3  |   U   |
-[   29.168565] cloud-init[941]: ci-info: +-------+-------------+----------+---------------+-----------+-------+
-[   29.168630] cloud-init[941]: Generating public/private rsa key pair.
-[   29.168662] cloud-init[941]: Your identification has been saved in /etc/ssh/ssh_host_rsa_key.
-[   29.168689] cloud-init[941]: Your public key has been saved in /etc/ssh/ssh_host_rsa_key.pub.
-[   29.168716] cloud-init[941]: The key fingerprint is:
-[   29.168742] cloud-init[941]: SHA256:1r0tdS1QR4ImshlrFZIvXDQqY0KWjyjXczKjivNyCRY root@ubuntu-xenial
-[   29.168768] cloud-init[941]: The key's randomart image is:
-[   29.168793] cloud-init[941]: +---[RSA 2048]----+
-[   29.168818] cloud-init[941]: |    o.  .o+. .o.o|
-[   29.168843] cloud-init[941]: |   o.   +o+.o. o |
-[   29.168868] cloud-init[941]: |   o.o+..X o.    |
-[   29.168892] cloud-init[941]: |.Eo BoooB... .  .|
-[   29.168916] cloud-init[941]: | o.. * .S.. . o o|
-[   29.168947] cloud-init[941]: |...    .     + o |
-[   29.168971] cloud-init[941]: |oo .        o .  |
-[   29.168996] cloud-init[941]: |= o          .   |
-[   29.169020] cloud-init[941]: | =.              |
-[   29.169045] cloud-init[941]: +----[SHA256]-----+
-[   29.169069] cloud-init[941]: Generating public/private dsa key pair.
-[   29.169132] cloud-init[941]: Your identification has been saved in /etc/ssh/ssh_host_dsa_key.
-[   29.169162] cloud-init[941]: Your public key has been saved in /etc/ssh/ssh_host_dsa_key.pub.
-[   29.169188] cloud-init[941]: The key fingerprint is:
-[   29.169212] cloud-init[941]: SHA256:WRvS7F0JtGJ0rwZqqCMqyx8FcAWQi/AKQ8o7jIA7S3A root@ubuntu-xenial
-[   29.169238] cloud-init[941]: The key's randomart image is:
-[   29.169265] cloud-init[941]: +---[DSA 1024]----+
-[   29.169290] cloud-init[941]: | oooo.    ..+    |
-[   29.169315] cloud-init[941]: |.oo      + . + . |
-[   29.169340] cloud-init[941]: |B...    . O . +  |
-[   29.169364] cloud-init[941]: |OoE .  . B * o   |
-[   29.169389] cloud-init[941]: |*=.  .. S o +    |
-[   29.169414] cloud-init[941]: |==  .. .   .     |
-[   29.169439] cloud-init[941]: |.oo.o            |
-[   29.169463] cloud-init[941]: |+. ...           |
-[   29.169489] cloud-init[941]: |+o..             |
-[   29.169514] cloud-init[941]: +----[SHA256]-----+
-[   29.169539] cloud-init[941]: Generating public/private ecdsa key pair.
-[   29.169564] cloud-init[941]: Your identification has been saved in /etc/ssh/ssh_host_ecdsa_key.
-[   29.169589] cloud-init[941]: Your public key has been saved in /etc/ssh/ssh_host_ecdsa_key.pub.
-[   29.169614] cloud-init[941]: The key fingerprint is:
-[   29.169638] cloud-init[941]: SHA256:1nBvcAexaclqSG7J3p4OcuHXE+nUv+DKdu9azfAiHsY root@ubuntu-xenial
-[   29.169663] cloud-init[941]: The key's randomart image is:
-[   29.169688] cloud-init[941]: +---[ECDSA 256]---+
-[   29.169712] cloud-init[941]: |            o.   |
-[   29.169736] cloud-init[941]: |           . =   |
-[   29.169761] cloud-init[941]: |        o o B .  |
-[   29.169787] cloud-init[941]: |       + * * +   |
-[   29.169812] cloud-init[941]: |        S + * o  |
-[   29.169837] cloud-init[941]: |       = + * . =.|
-[   29.169862] cloud-init[941]: |      . = o E...=|
-[   29.169887] cloud-init[941]: |       o +.=.=o..|
-[   29.169911] cloud-init[941]: |         .=o+o++ |
-[   29.169935] cloud-init[941]: +----[SHA256]-----+
-[   29.169960] cloud-init[941]: Generating public/private ed25519 key pair.
-[   29.169984] [  OK  ] Started Timer to automatically refresh installed snaps.
-[  OK  ] Listening on ACPID Listen Socket.
-[  OK  ] Listening on D-Bus System Message Bus Socket.
-cloud-init[941]: Your identification has been saved in /etc/ssh/ssh_host_ed25519_key.
          Starting LXD - unix socket.
-[  OK  ] Started Timer to automatically fetch and run repair assertions.
-[   29.901211] cloud-init[941]: Your public key has been saved in /etc/ssh/ssh_host_ed25519_key.pub.
-[   29.961888] cloud-init[941]: The key fingerprint is:
-[  OK  [   29.980151] cloud-init[941]: SHA256:WevfbOxyoj5EQcQlZZgjb837ryf2xnCYO0X+hlTQCdo root@ubuntu-xenial
-[   29.980196] cloud-init[941]: The key's randomart image is:
-[   29.980226] cloud-init[941]: +--[ED25519 256]--+
-[   29.980253] cloud-init[941]: |         ++=+....|
-[   29.980280] cloud-init[941]: |        . *oo ...|
-[   29.980307] cloud-init[941]: |         o.* E . |
-[   29.980334] cloud-init[941]: |         o+.o   o|
-[   29.980360] cloud-init[941]: |        So.  . * |
-[   29.980385] cloud-init[941]: |         .. . = +|
-[   29.980411] cloud-init[941]: |         ..  + B.|
-[   29.980436] cloud-init[941]: |          ..oo@ *|
-[   29.980639] cloud-init[941]: |         .oo.B=Xo|
-[   29.980673] cloud-init[941]: +----[SHA256]-----+
-] Started ACPI Events Check.
+[  OK  ] Started ACPI Events Check.
 [  OK  ] Reached target Paths.
-[  OK  ] Reached target Network is Online.
-         Starting iSCSI initiator daemon (iscsid)...
+[  OK  ] Listening on ACPID Listen Socket.
 [  OK  ] Started Daily apt download activities.
 [  OK  ] Started Daily apt upgrade and clean activities.
+         Starting Socket activation for snappy daemon.
+[  OK  ] Started Timer to automatically refresh installed snaps.
+[  OK  ] Listening on UUID daemon activation socket.
+[  OK  ] Started Timer to automatically fetch and run repair assertions.
 [  OK  ] Reached target Timers.
-[  OK  ] Listening on Socket activation for snappy daemon.
+[  OK  ] Listening on D-Bus System Message Bus Socket.
 [  OK  ] Listening on LXD - unix socket.
+[  OK  ] Listening on Socket activation for snappy daemon.
+[  OK  ] Started iSCSI initiator daemon (iscsid).
+         Starting Login to default iSCSI targets...
 [  OK  ] Reached target Sockets.
 [  OK  ] Reached target Basic System.
 [  OK  ] Started ACPI event daemon.
+[  OK  ] Started D-Bus System Message Bus.
+         Starting Apply the settings specified in cloud-config...
+         Starting /etc/rc.local Compatibility...
+         Starting Snappy daemon...
+[  OK  ] Started Regular background program processing daemon.
+[  OK  ] Started Deferred execution scheduler.
+         Starting LSB: MD monitoring daemon...
          Starting Login Service...
+         Starting LXD - container startup/shutdown...
+         Starting System Logging Service...
          Starting Pollinate to seed the pseudo random number generator...
-[  OK  ] Started Deferred execution scheduler.
          Starting LSB: Record successful boot for GRUB...
-[  OK  ] Started Unattended Upgrades Shutdown.
-         Starting /etc/rc.local Compatibility...
-         Starting Accounts Service...
 [  OK  ] Started FUSE filesystem for LXC.
-[  OK  ] Started D-Bus System Message Bus.
-         Starting System Logging Service...
-         Starting LSB: MD monitoring daemon...
-         Starting Snappy daemon...
-         Starting Apply the settings specified in cloud-config...
-         Starting LXD - container startup/shutdown...
-[  OK  ] Started Regular background program processing daemon.
+         Starting Accounts Service...
+[  OK  ] Started Unattended Upgrades Shutdown.
 [  OK  ] Started /etc/rc.local Compatibility.
-[  OK  ] Started Login Service.
-[  OK  ] Started iSCSI initiator daemon (iscsid).
-         Starting Login to default iSCSI targets...
-         Starting Authenticate and Authorize Users to Run Privileged Tasks...
-[  OK  ] Started LSB: Record successful boot for GRUB.
 [  OK  ] Started LSB: MD monitoring daemon.
-[  OK  ] Started System Logging Service.
-[  OK  ] Started Authenticate and Authorize Users to Run Privileged Tasks.
-[  OK  ] Started Accounts Service.
 [  OK  ] Started Login to default iSCSI targets.
 [  OK  ] Reached target Remote File Systems (Pre).
 [  OK  ] Reached target Remote File Systems.
-         Starting LSB: daemon to balance interrupts for SMP systems...
          Starting LSB: automatic crash report generation...
-         Starting Permit User Sessions...
          Starting LSB: VirtualBox Linux Additions...
+         Starting Permit User Sessions...
          Starting LSB: Set the CPU Frequency Scaling governor to "ondemand"...
+         Starting LSB: daemon to balance interrupts for SMP systems...
+[  OK  ] Started Login Service.
+[  OK  ] Started System Logging Service.
+[  OK  ] Started LSB: automatic crash report generation.
 [  OK  ] Started Permit User Sessions.
-[  OK  ] Started LSB: Set the CPU Frequency Scaling governor to "ondemand".
          Starting Terminate Plymouth Boot Screen...
          Starting Hold until boot process finishes up...
 [  OK  ] Started Terminate Plymouth Boot Screen.
 [  OK  ] Started Hold until boot process finishes up.
-         Starting Set console scheme...
 [  OK  ] Started Getty on tty1.
+         Starting Set console scheme...
 [  OK  ] Started Serial Getty on ttyS0.
 [  OK  ] Reached target Login Prompts.
-[  OK  ] Started LSB: automatic crash report generation.
+[  OK  ] Started LSB: Set the CPU Frequency Scaling governor to "ondemand".
 [  OK  ] Started LSB: daemon to balance interrupts for SMP systems.
-[   32.970545] cloud-init[1120]: Generating locales (this might take a while)...
+[  OK  ] Started LSB: Record successful boot for GRUB.
 [  OK  ] Started Set console scheme.
+[   31.754838] cloud-init[1067]: Generating locales (this might take a while)...
+         Starting Authenticate and Authorize Users to Run Privileged Tasks...
 [  OK  ] Started LSB: VirtualBox Linux Additions.
+[  OK  ] Started Authenticate and Authorize Users to Run Privileged Tasks.
+[  OK  ] Started Accounts Service.
 [  OK  ] Started Pollinate to seed the pseudo random number generator.
          Starting OpenBSD Secure Shell server...
-[  OK  ] Started OpenBSD Secure Shell server.
 [  OK  ] Started Snappy daemon.
          Starting Auto import assertions from block devices...
 [  OK  ] Started Auto import assertions from block devices.
-[   35.773883] cloud-init[1120]:   en_US.UTF-8... done
-[   35.792849] cloud-init[1120]: Generation complete.
+[  OK  ] Started OpenBSD Secure Shell server.
+[   34.720983] cloud-init[1067]:   en_US.UTF-8... done
+[   34.742144] cloud-init[1067]: Generation complete.
 [  OK  ] Started LXD - container startup/shutdown.
 [  OK  ] Reached target Multi-User System.
 [  OK  ] Reached target Graphical Interface.
@@ -690,26 +681,25 @@ cloud-init[941]: Your identification has been saved in /etc/ssh/ssh_host_ed25519
 [  OK  ] Stopped OpenBSD Secure Shell server.
          Starting OpenBSD Secure Shell server...
 [  OK  ] Started OpenBSD Secure Shell server.
-[   37.934670] cloud-init[1120]: Cloud-init v. 0.7.9 running 'modules:config' at Fri, 05 Jan 2018 16:14:25 +0000. Up 31.82 seconds.
-ci-info: no authorized ssh keys fingerprints found for user ubuntu.
-<14>Jan  5 16:14:32 ec2: 
-<14>Jan  5 16:14:32 ec2: #############################################################
-<14>Jan  5 16:14:32 ec2: -----BEGIN SSH HOST KEY FINGERPRINTS-----
-<14>Jan  5 16:14:32 ec2: 1024 SHA256:WRvS7F0JtGJ0rwZqqCMqyx8FcAWQi/AKQ8o7jIA7S3A root@ubuntu-xenial (DSA)
-<14>Jan  5 16:14:32 ec2: 256 SHA256:1nBvcAexaclqSG7J3p4OcuHXE+nUv+DKdu9azfAiHsY root@ubuntu-xenial (ECDSA)
-<14>Jan  5 16:14:32 ec2: 256 SHA256:WevfbOxyoj5EQcQlZZgjb837ryf2xnCYO0X+hlTQCdo root@ubuntu-xenial (ED25519)
-<14>Jan  5 16:14:32 ec2: 2048 SHA256:1r0tdS1QR4ImshlrFZIvXDQqY0KWjyjXczKjivNyCRY root@ubuntu-xenial (RSA)
-<14>Jan  5 16:14:32 ec2: -----END SSH HOST KEY FINGERPRINTS-----
-<14>Jan  5 16:14:32 ec2: #############################################################
------BEGIN SSH HOST KEY KEYS-----
-ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMS1bABsQpmvG0gzBlBXQmxzQMXcG9KET8LdnlK46CY7QAQ0hJbet5zO+CjLp2PnAaEn95xpARuM0qTQRy1V/P0= root@ubuntu-xenial
-ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMN2qemWl4hWvtPOW7sF9jD+IgKvVk2B5INTiABEdARH root@ubuntu-xenial
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDjRLsYhNGpLXbb57MNfWvYSTr4jVSxrRlLU/H02WbbfvjxQT68jSfa8EZ/yEcvLNLe+YjtBAsWq24QIMAkjt1LweWECPVIUJGGXmLJjghFoHZS1p33mSBpJXuD+a4AB6gPKRNGBIzoGcjpdFuYTtluz/Z3ler3VUbfvkU9vbwYQVPbSnyyPMEXn2X8A4TjCY3X6Hu5OQ4gs0wDf7Cp8dLcoga+6kuJEM1Gzwyn3Svzp/hyNGgW1cD+QH+I+HY/+ce0baaOSPWYFY/JKPCO8gTzshRD1jqlEFgu5J6Scw06hF7ar+FwLgdayRm3bDT7Rp/fknbCmu09BpXbshHFiN5n root@ubuntu-xenial
------END SSH HOST KEY KEYS-----
-[   38.538544] cloud-init[1332]: Cloud-init v. 0.7.9 running 'modules:final' at Fri, 05 Jan 2018 16:14:31 +0000. Up 38.28 seconds.
-[   38.538706] cloud-init[1332]: ci-info: no authorized ssh keys fingerprints found for user ubuntu.
-[   38.538826] cloud-init[1332]: Cloud-init v. 0.7.9 finished at Fri, 05 Jan 2018 16:14:32 +0000. Datasource DataSourceNoCloud [seed=/dev/sdb][dsmode=net].  Up 38.52 seconds
 

 Ubuntu 16.04.3 LTS ubuntu-xenial ttyS0
 
-ubuntu-xenial login: 
\ No newline at end of file
+ubuntu-xenial login: [   37.338134] cloud-init[1067]: Cloud-init v. 0.7.9 running 'modules:config' at Fri, 12 Jan 2018 16:25:35 +0000. Up 30.49 seconds.
+ci-info: no authorized ssh keys fingerprints found for user ubuntu.
+<14>Jan 12 16:25:43 ec2: 
+<14>Jan 12 16:25:43 ec2: #############################################################
+<14>Jan 12 16:25:43 ec2: -----BEGIN SSH HOST KEY FINGERPRINTS-----
+<14>Jan 12 16:25:43 ec2: 1024 SHA256:Gy3BhAUZK67IVjGWoEJ9ZjL1iYQfFjqCZjJu7cuY5II root@ubuntu-xenial (DSA)
+<14>Jan 12 16:25:43 ec2: 256 SHA256:LHPuX+xSX6TWlMG9eA2eVMHUlpNFqBi3/rMRUQSgxSo root@ubuntu-xenial (ECDSA)
+<14>Jan 12 16:25:43 ec2: 256 SHA256:LyvReAP8OAsny5vYKNvrFLrJ1WLKnq2rNa7pXvOGAzE root@ubuntu-xenial (ED25519)
+<14>Jan 12 16:25:43 ec2: 2048 SHA256:V91RkFaDF9+XI2WH7hR1S4znUXSvnC8CpXUUoBdEMHw root@ubuntu-xenial (RSA)
+<14>Jan 12 16:25:43 ec2: -----END SSH HOST KEY FINGERPRINTS-----
+<14>Jan 12 16:25:43 ec2: #############################################################
+-----BEGIN SSH HOST KEY KEYS-----
+ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBGsHFzFvw/8X4/hE5vEkhvcrnggSWs22mCQ7H9e+srZUkuliJgzgN9mNlgHIvzYd91NUwt5oY7UAHLcHtmuIE1I= root@ubuntu-xenial
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIA+EnC3+7yWFVzntwRFn27WMpFRDDA7pQfA7Nr9cbOv7 root@ubuntu-xenial
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCcFX+xZ6aYvmYDV4RJN46CcOOYHxp0GCgrpdrvEPAXFolK9jj2nCmvMYTAM1UgR549wu10wgva6vdghCbBXUeykVjN4FCp3LtX63/kMZ3W7Iw+cYvPl8AoGlMT1qtSVFdY6Fvfhyns79b9doQd/Z+rh5lkEbwu1OO+h5IzQMTcK4Xd8rkDnyoXCd7FhPaP//uToVS0veeO9l7Nnnb+7wZfTaEJg57X7TppkNd5PWef7ChfD3KHLSaRv+c4ut1Nt5iczczDztzbeb2aH/niU6xhYb9JpXtu7TlOhBKFzXvc792AdJgyo9fxF9sLDXWpUQl5CFHjNdmQ2bPRYQjn4o8z root@ubuntu-xenial
+-----END SSH HOST KEY KEYS-----
+[   37.884730] cloud-init[1349]: Cloud-init v. 0.7.9 running 'modules:final' at Fri, 12 Jan 2018 16:25:43 +0000. Up 37.65 seconds.
+[   37.884874] cloud-init[1349]: ci-info: no authorized ssh keys fingerprints found for user ubuntu.
+[   37.884951] cloud-init[1349]: Cloud-init v. 0.7.9 finished at Fri, 12 Jan 2018 16:25:43 +0000. Datasource DataSourceNoCloud [seed=/dev/sdb][dsmode=net].  Up 37.87 seconds