diff --git a/Vagrantfile b/Vagrantfile index 18903e3aa4d55b6264d017496a7b351ede12b6ed..c850860c0cf2ac053ea0d1e814dc8ec3b317c00d 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -94,10 +94,10 @@ Vagrant.configure("2") do |config| config.vm.network "forwarded_port", guest: 9092, host: 9092 # install the CLMC service - config.vm.provision :shell, :path => 'scripts/influx/install-clmc-service.sh' + config.vm.provision :shell, :path => 'scripts/clmc-service/install-clmc-service.sh' # start the CLMC service - config.vm.provision :shell, :path => 'scripts/influx/start-clmc-service.sh' + config.vm.provision :shell, :path => 'scripts/clmc-service/start-clmc-service.sh' end # Apache Server 1 config.vm.define "apache1" do |my| @@ -113,10 +113,10 @@ Vagrant.configure("2") do |config| # install the apache service - config.vm.provision :shell, :path => 'scripts/apache/install-apache.sh' + config.vm.provision :shell, :path => 'test/services/apache/install-apache.sh' # Install CLMC agent 1 - config.vm.provision :shell, :path => 'scripts/influx/install-clmc-agent.sh', :args => "/vagrant/scripts/apache/telegraf_apache_template.conf #{ipendpoints['apache1'][:location]} #{ipendpoints['apache1'][:sfc_id]} #{ipendpoints['apache1'][:sfc_id_instance]} #{ipendpoints['apache1'][:sf_id]} #{ipendpoints['apache1'][:sf_id_instance]} #{ipendpoints['apache1'][:ipendpoint_id]} #{ipendpoints['apache1'][:influxdb_url]} #{ipendpoints['apache1'][:database_name]}" + config.vm.provision :shell, :path => 'scripts/clmc-agent/install-clmc-agent.sh', :args => "/vagrant/test/services/apache/telegraf_apache_template.conf #{ipendpoints['apache1'][:location]} #{ipendpoints['apache1'][:sfc_id]} #{ipendpoints['apache1'][:sfc_id_instance]} #{ipendpoints['apache1'][:sf_id]} #{ipendpoints['apache1'][:sf_id_instance]} #{ipendpoints['apache1'][:ipendpoint_id]} #{ipendpoints['apache1'][:influxdb_url]} #{ipendpoints['apache1'][:database_name]}" end # Apache Server 2 config.vm.define "apache2" do |my| @@ -129,13 +129,13 @@ Vagrant.configure("2") do |config| end # open apache port - config.vm.network "forwarded_port", guest: 80, host: 8081 + config.vm.network "forwarded_port", guest: 80, host: 8082 # install the apache service - config.vm.provision :shell, :path => 'scripts/apache/install-apache.sh' + config.vm.provision :shell, :path => 'test/services/apache/install-apache.sh' # Install CLMC agent - config.vm.provision :shell, :path => 'scripts/influx/install-clmc-agent.sh', :args => "/vagrant/scripts/apache/telegraf_apache_template.conf #{ipendpoints['apache2'][:location]} #{ipendpoints['apache2'][:sfc_id]} #{ipendpoints['apache2'][:sfc_id_instance]} #{ipendpoints['apache2'][:sf_id]} #{ipendpoints['apache2'][:sf_id_instance]} #{ipendpoints['apache2'][:ipendpoint_id]} #{ipendpoints['apache2'][:influxdb_url]} #{ipendpoints['apache2'][:database_name]}" + config.vm.provision :shell, :path => 'scripts/clmc-agent/install-clmc-agent.sh', :args => "/vagrant/test/services/apache/telegraf_apache_template.conf #{ipendpoints['apache2'][:location]} #{ipendpoints['apache2'][:sfc_id]} #{ipendpoints['apache2'][:sfc_id_instance]} #{ipendpoints['apache2'][:sf_id]} #{ipendpoints['apache2'][:sf_id_instance]} #{ipendpoints['apache2'][:ipendpoint_id]} #{ipendpoints['apache2'][:influxdb_url]} #{ipendpoints['apache2'][:database_name]}" end # NGINX VM @@ -153,10 +153,10 @@ Vagrant.configure("2") do |config| config.vm.network "forwarded_port", guest: 80, host: 8083 # install the apache service - config.vm.provision :shell, :path => 'scripts/nginx/install-nginx.sh' + config.vm.provision :shell, :path => 'test/services/nginx/install-nginx.sh' # Install CLMC agent - config.vm.provision :shell, :path => 'scripts/influx/install-clmc-agent.sh', :args => "/vagrant/scripts/nginx/telegraf_nginx_template.conf #{ipendpoints['nginx1'][:location]} #{ipendpoints['nginx1'][:sfc_id]} #{ipendpoints['nginx1'][:sfc_id_instance]} #{ipendpoints['nginx1'][:sf_id]} #{ipendpoints['nginx1'][:sf_id_instance]} #{ipendpoints['nginx1'][:ipendpoint_id]} #{ipendpoints['nginx1'][:influxdb_url]} #{ipendpoints['nginx1'][:database_name]}" + config.vm.provision :shell, :path => 'scripts/clmc-agent/install-clmc-agent.sh', :args => "/vagrant/test/services/nginx/telegraf_nginx_template.conf #{ipendpoints['nginx1'][:location]} #{ipendpoints['nginx1'][:sfc_id]} #{ipendpoints['nginx1'][:sfc_id_instance]} #{ipendpoints['nginx1'][:sf_id]} #{ipendpoints['nginx1'][:sf_id_instance]} #{ipendpoints['nginx1'][:ipendpoint_id]} #{ipendpoints['nginx1'][:influxdb_url]} #{ipendpoints['nginx1'][:database_name]}" end # MONGODB VM @@ -173,10 +173,10 @@ Vagrant.configure("2") do |config| config.vm.network "forwarded_port", guest: 80, host: 8084 # install the apache service - config.vm.provision :shell, :path => 'scripts/mongo/install-mongo.sh' + config.vm.provision :shell, :path => 'test/services/mongo/install-mongo.sh' # Install CLMC agent - config.vm.provision :shell, :path => 'scripts/influx/install-clmc-agent.sh', :args => "/vagrant/scripts/mongo/telegraf_mongo_template.conf #{ipendpoints['mongo1'][:location]} #{ipendpoints['mongo1'][:sfc_id]} #{ipendpoints['mongo1'][:sfc_id_instance]} #{ipendpoints['mongo1'][:sf_id]} #{ipendpoints['mongo1'][:sf_id_instance]} #{ipendpoints['mongo1'][:ipendpoint_id]} #{ipendpoints['mongo1'][:influxdb_url]} #{ipendpoints['mongo1'][:database_name]}" + config.vm.provision :shell, :path => 'scripts/clmc-agent/install-clmc-agent.sh', :args => "/vagrant/test/services/mongo/telegraf_mongo_template.conf #{ipendpoints['mongo1'][:location]} #{ipendpoints['mongo1'][:sfc_id]} #{ipendpoints['mongo1'][:sfc_id_instance]} #{ipendpoints['mongo1'][:sf_id]} #{ipendpoints['mongo1'][:sf_id_instance]} #{ipendpoints['mongo1'][:ipendpoint_id]} #{ipendpoints['mongo1'][:influxdb_url]} #{ipendpoints['mongo1'][:database_name]}" end diff --git a/scripts/apache/install-apache.sh b/scripts/apache/install-apache.sh deleted file mode 100644 index 735fc0a46e4dbe491ce82edba7b5aeb17d84c005..0000000000000000000000000000000000000000 --- a/scripts/apache/install-apache.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -#///////////////////////////////////////////////////////////////////////// -#// -#// (c) University of Southampton IT Innovation Centre, 2017 -#// -#// Copyright in this software belongs to University of Southampton -#// IT Innovation Centre of Gamma House, Enterprise Road, -#// Chilworth Science Park, Southampton, SO16 7NS, UK. -#// -#// This software may not be used, sold, licensed, transferred, copied -#// or reproduced in whole or in part in any manner or form or in or -#// on any media by any person other than in accordance with the terms -#// of the Licence Agreement supplied with the software, or otherwise -#// without the prior written consent of the copyright owners. -#// -#// This software is distributed WITHOUT ANY WARRANTY, without even the -#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -#// PURPOSE, except where stated in the Licence Agreement supplied with -#// the software. -#// -#// Created By : Michael Boniface -#// Created Date : 23/01/2018 -#// Created for Project : FLAME -#// -#///////////////////////////////////////////////////////////////////////// - -# Install apache -sudo apt-get update -sudo apt-get -y install apache2 \ No newline at end of file diff --git a/scripts/apache/telegraf_apache_template.conf b/scripts/apache/telegraf_apache_template.conf deleted file mode 100644 index 99f334996bd9f67a4465cd27950978a2803d69d4..0000000000000000000000000000000000000000 --- a/scripts/apache/telegraf_apache_template.conf +++ /dev/null @@ -1,133 +0,0 @@ -# Telegraf configuration - -# Telegraf is entirely plugin driven. All metrics are gathered from the -# declared inputs, and sent to the declared outputs. - -# Plugins must be declared in here to be active. -# To deactivate a plugin, comment out the name and any variables. - -# Use 'telegraf -config telegraf.conf -test' to see what metrics a config -# file would generate. - -# Global tags can be specified here in key="value" format. -[global_tags] - # location of the data centre - location="{{LOCATION}}" - # media service template id - sfc="{{SFC_ID}}" - # media service instance - sfc_i="{{SFC_ID_INSTANCE}}" - # service function type - sf="{{SF_ID}}" - # service function instance id - sf_i="{{SF_ID_INSTANCE}}" - # ipendpoint id aka surrogate instance - ipendpoint="{{IP_ENDPOINT_ID}}" - -# Configuration for telegraf agent -[agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will cache metric_buffer_limit metrics for each output, and will - ## flush this buffer on a successful write. - metric_buffer_limit = 1000 - ## Flush the buffer whenever full, regardless of flush_interval. - flush_buffer_when_full = true - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. You shouldn't set this below - ## interval. Maximum flush_interval will be flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## Logging configuration: - ## Run telegraf in debug mode - debug = false - ## Run telegraf in quiet mode - quiet = false - ## Specify the log file name. The empty string means to log to stdout. - logfile = "G:/Telegraf/telegraf.log" - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - - -############################################################################### -# OUTPUTS # -############################################################################### - -# Configuration for influxdb server to send metrics to -[[outputs.influxdb]] - # The full HTTP or UDP endpoint URL for your InfluxDB instance. - # Multiple urls can be specified but it is assumed that they are part of the same - # cluster, this means that only ONE of the urls will be written to each interval. - # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example - urls = ["{{INFLUXDB_URL}}"] # required - # The target database for metrics (telegraf will create it if not exists) - database = "{{DATABASE_NAME}}" # required - # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". - # note: using second precision greatly helps InfluxDB compression - precision = "s" - - ## Write timeout (for the InfluxDB client), formatted as a string. - ## If not provided, will default to 5s. 0s means no timeout (not recommended). - timeout = "5s" - # username = "telegraf" - # password = "metricsmetricsmetricsmetrics" - # Set the user agent for HTTP POSTs (can be useful for log differentiation) - # user_agent = "telegraf" - # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) - # udp_payload = 512 - - -############################################################################### -# INPUTS # -############################################################################### -# # Influx HTTP write listener -[[inputs.http_listener]] - ## Address and port to host HTTP listener on - service_address = ":8186" - - ## timeouts - read_timeout = "10s" - write_timeout = "10s" - - ## HTTPS - #tls_cert= "/etc/telegraf/cert.pem" - #tls_key = "/etc/telegraf/key.pem" - - ## MTLS - #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - -# Read Apache status information (mod_status) -[[inputs.apache]] - ## An array of URLs to gather from, must be directed at the machine - ## readable version of the mod_status page including the auto query string. - ## Default is "http://localhost/server-status?auto". - urls = ["http://localhost/server-status?auto"] - - ## Credentials for basic HTTP authentication. - # username = "myuser" - # password = "mypassword" - - ## Maximum time to receive response. - # response_timeout = "5s" - - ## Optional SSL Config - # ssl_ca = "/etc/telegraf/ca.pem" - # ssl_cert = "/etc/telegraf/cert.pem" - # ssl_key = "/etc/telegraf/key.pem" - ## Use SSL but skip chain & host verification - # insecure_skip_verify = false \ No newline at end of file diff --git a/scripts/influx/install-clmc-agent.sh b/scripts/influx/install-clmc-agent.sh deleted file mode 100755 index ab3d0bdcecd807e2323da45807e62c8eb2a17060..0000000000000000000000000000000000000000 --- a/scripts/influx/install-clmc-agent.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash -#///////////////////////////////////////////////////////////////////////// -#// -#// (c) University of Southampton IT Innovation Centre, 2017 -#// -#// Copyright in this software belongs to University of Southampton -#// IT Innovation Centre of Gamma House, Enterprise Road, -#// Chilworth Science Park, Southampton, SO16 7NS, UK. -#// -#// This software may not be used, sold, licensed, transferred, copied -#// or reproduced in whole or in part in any manner or form or in or -#// on any media by any person other than in accordance with the terms -#// of the Licence Agreement supplied with the software, or otherwise -#// without the prior written consent of the copyright owners. -#// -#// This software is distributed WITHOUT ANY WARRANTY, without even the -#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -#// PURPOSE, except where stated in the Licence Agreement supplied with -#// the software. -#// -#// Created By : Michael Boniface -#// Created Date : 13/12/2017 -#// Created for Project : FLAME -#// -#///////////////////////////////////////////////////////////////////////// - -# Install telegraf -if [ "$#" -ne 9 ]; then - echo "Error: illegal number of arguments: "$# - echo "Usage: install-clmc-agent.sh TELEGRAF_CONF_FILE LOCATION SFC_ID SFC_ID_INSTANCE SF_ID SF_ID_INSTANCE IP_ENDPOINT_ID INFLUXDB_URL DATABASE_NAME" - exit -fi - -TELEGRAF_CONF_FILE=$1 -LOCATION=$2 -SFC_ID=$3 -SFC_ID_INSTANCE=$4 -SF_ID=$5 -SF_ID_INSTANCE=$6 -IP_ENDPOINT_ID=$7 -INFLUXDB_URL=$8 -DATABASE_NAME=$9 - -if [ ! -f $TELEGRAF_CONF_FILE]; then - echo "Error: Telegraf conf template file not found: "$TELEGRAF_CONF_FILE - exit -fi - -wget https://dl.influxdata.com/telegraf/releases/telegraf_1.3.2-1_amd64.deb -dpkg -i telegraf_1.3.2-1_amd64.deb - -# Copy configuration -echo "Telegraf config file: " $TELEGRAF_CONF_FILE -cp $TELEGRAF_CONF_FILE /etc/telegraf/telegraf.conf - -echo "INFLUXDB_URL: " $INFLUXDB_URL -echo "DATABASE_NAME: " $DATABASE_NAME - -# Replace template parameters -sed -i 's/{{LOCATION}}/'$LOCATION'/g' /etc/telegraf/telegraf.conf -sed -i 's/{{SFC_ID}}/'$SFC_ID'/g' /etc/telegraf/telegraf.conf -sed -i 's/{{SFC_ID_INSTANCE}}/'$SFC_ID_INSTANCE'/g' /etc/telegraf/telegraf.conf -sed -i 's/{{SF_ID}}/'$SF_ID'/g' /etc/telegraf/telegraf.conf -sed -i 's/{{SF_ID_INSTANCE}}/'$SF_ID_INSTANCE'/g' /etc/telegraf/telegraf.conf -sed -i 's/{{IP_ENDPOINT_ID}}/'$IP_ENDPOINT_ID'/g' /etc/telegraf/telegraf.conf -sed -i 's|{{INFLUXDB_URL}}|'$INFLUXDB_URL'|g' /etc/telegraf/telegraf.conf -sed -i 's/{{DATABASE_NAME}}/'$DATABASE_NAME'/g' /etc/telegraf/telegraf.conf - -# Start telegraf -systemctl start telegraf \ No newline at end of file diff --git a/scripts/influx/install-clmc-service.sh b/scripts/influx/install-clmc-service.sh deleted file mode 100755 index 42e247ad17b20eba67cdc425891446d8ee83ea99..0000000000000000000000000000000000000000 --- a/scripts/influx/install-clmc-service.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -#///////////////////////////////////////////////////////////////////////// -#// -#// (c) University of Southampton IT Innovation Centre, 2017 -#// -#// Copyright in this software belongs to University of Southampton -#// IT Innovation Centre of Gamma House, Enterprise Road, -#// Chilworth Science Park, Southampton, SO16 7NS, UK. -#// -#// This software may not be used, sold, licensed, transferred, copied -#// or reproduced in whole or in part in any manner or form or in or -#// on any media by any person other than in accordance with the terms -#// of the Licence Agreement supplied with the software, or otherwise -#// without the prior written consent of the copyright owners. -#// -#// This software is distributed WITHOUT ANY WARRANTY, without even the -#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -#// PURPOSE, except where stated in the Licence Agreement supplied with -#// the software. -#// -#// Created By : Michael Boniface -#// Created Date : 13/12/2017 -#// Created for Project : FLAME -#// -#///////////////////////////////////////////////////////////////////////// - -# install python for the simulator -apt-get update -apt-get -y install python - -# install influx -wget https://dl.influxdata.com/influxdb/releases/influxdb_1.2.4_amd64.deb -dpkg -i influxdb_1.2.4_amd64.deb - -# install kapacitor -wget https://dl.influxdata.com/kapacitor/releases/kapacitor_1.3.1_amd64.deb -dpkg -i kapacitor_1.3.1_amd64.deb - -# install Chronograf -wget https://dl.influxdata.com/chronograf/releases/chronograf_1.3.3.0_amd64.deb -dpkg -i chronograf_1.3.3.0_amd64.deb diff --git a/scripts/influx/start-clmc-service.sh b/scripts/influx/start-clmc-service.sh deleted file mode 100755 index f92c6b5eaf0c93b5a98585b4aab4182d09e2360e..0000000000000000000000000000000000000000 --- a/scripts/influx/start-clmc-service.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -#///////////////////////////////////////////////////////////////////////// -#// -#// (c) University of Southampton IT Innovation Centre, 2018 -#// -#// Copyright in this software belongs to University of Southampton -#// IT Innovation Centre of Gamma House, Enterprise Road, -#// Chilworth Science Park, Southampton, SO16 7NS, UK. -#// -#// This software may not be used, sold, licensed, transferred, copied -#// or reproduced in whole or in part in any manner or form or in or -#// on any media by any person other than in accordance with the terms -#// of the Licence Agreement supplied with the software, or otherwise -#// without the prior written consent of the copyright owners. -#// -#// This software is distributed WITHOUT ANY WARRANTY, without even the -#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -#// PURPOSE, except where stated in the Licence Agreement supplied with -#// the software. -#// -#// Created By : Simon Crowle -#// Created Date : 03/11/2018 -#// Created for Project : FLAME -#// -#///////////////////////////////////////////////////////////////////////// - -echo Starting TICK stack services... - -systemctl start influxdb -systemctl start kapacitor -systemctl start chronograf \ No newline at end of file diff --git a/scripts/influx/telegraf_ipendpoint_template.conf b/scripts/influx/telegraf_ipendpoint_template.conf deleted file mode 100644 index 2358dcca5bfcd48d4b45e0e1ccd316357f1e4ba7..0000000000000000000000000000000000000000 --- a/scripts/influx/telegraf_ipendpoint_template.conf +++ /dev/null @@ -1,112 +0,0 @@ -# Telegraf configuration - -# Telegraf is entirely plugin driven. All metrics are gathered from the -# declared inputs, and sent to the declared outputs. - -# Plugins must be declared in here to be active. -# To deactivate a plugin, comment out the name and any variables. - -# Use 'telegraf -config telegraf.conf -test' to see what metrics a config -# file would generate. - -# Global tags can be specified here in key="value" format. -[global_tags] - # location of the data centre - location="{{LOCATION}}" - # media service template id - sfc="{{SFC_ID}}" - # media service instance - sfc_i="{{SFC_ID_INSTANCE}}" - # service function type - sf="{{SF_ID}}" - # service function instance id - sf_i="{{SF_ID_INSTANCE}}" - # ipendpoint id aka surrogate instance - ipendpoint="{{IP_ENDPOINT_ID}}" - -# Configuration for telegraf agent -[agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will cache metric_buffer_limit metrics for each output, and will - ## flush this buffer on a successful write. - metric_buffer_limit = 1000 - ## Flush the buffer whenever full, regardless of flush_interval. - flush_buffer_when_full = true - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. You shouldn't set this below - ## interval. Maximum flush_interval will be flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## Logging configuration: - ## Run telegraf in debug mode - debug = false - ## Run telegraf in quiet mode - quiet = false - ## Specify the log file name. The empty string means to log to stdout. - logfile = "G:/Telegraf/telegraf.log" - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - - -############################################################################### -# OUTPUTS # -############################################################################### - -# Configuration for influxdb server to send metrics to -[[outputs.influxdb]] - # The full HTTP or UDP endpoint URL for your InfluxDB instance. - # Multiple urls can be specified but it is assumed that they are part of the same - # cluster, this means that only ONE of the urls will be written to each interval. - # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example - urls = ["{{INFLUXDB_URL}}"] # required - # The target database for metrics (telegraf will create it if not exists) - database = "{{DATABASE_NAME}}" # required - # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". - # note: using second precision greatly helps InfluxDB compression - precision = "s" - - ## Write timeout (for the InfluxDB client), formatted as a string. - ## If not provided, will default to 5s. 0s means no timeout (not recommended). - timeout = "5s" - # username = "telegraf" - # password = "metricsmetricsmetricsmetrics" - # Set the user agent for HTTP POSTs (can be useful for log differentiation) - # user_agent = "telegraf" - # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) - # udp_payload = 512 - - -############################################################################### -# INPUTS # -############################################################################### -# # Influx HTTP write listener -[[inputs.http_listener]] - ## Address and port to host HTTP listener on - service_address = ":8186" - - ## timeouts - read_timeout = "10s" - write_timeout = "10s" - - ## HTTPS - #tls_cert= "/etc/telegraf/cert.pem" - #tls_key = "/etc/telegraf/key.pem" - - ## MTLS - #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] diff --git a/scripts/mongo/install-mongo.sh b/scripts/mongo/install-mongo.sh deleted file mode 100644 index 25797d14568ba3fbc8b84dec1d2f5e969a861180..0000000000000000000000000000000000000000 --- a/scripts/mongo/install-mongo.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -#///////////////////////////////////////////////////////////////////////// -#// -#// (c) University of Southampton IT Innovation Centre, 2017 -#// -#// Copyright in this software belongs to University of Southampton -#// IT Innovation Centre of Gamma House, Enterprise Road, -#// Chilworth Science Park, Southampton, SO16 7NS, UK. -#// -#// This software may not be used, sold, licensed, transferred, copied -#// or reproduced in whole or in part in any manner or form or in or -#// on any media by any person other than in accordance with the terms -#// of the Licence Agreement supplied with the software, or otherwise -#// without the prior written consent of the copyright owners. -#// -#// This software is distributed WITHOUT ANY WARRANTY, without even the -#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -#// PURPOSE, except where stated in the Licence Agreement supplied with -#// the software. -#// -#// Created By : Michael Boniface -#// Created Date : 23/01/2018 -#// Created for Project : FLAME -#// -#///////////////////////////////////////////////////////////////////////// - -# Install apache -sudo apt-get update -sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 2930ADAE8CAF5059EE73BB4B58712A2291FA4AD5 -echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.6 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.6.list -sudo apt-get update -sudo apt-get install -y mongodb-org diff --git a/scripts/mongo/telegraf_mongo_template.conf b/scripts/mongo/telegraf_mongo_template.conf deleted file mode 100644 index e65c22f60894f586a0da06038b085885235aba63..0000000000000000000000000000000000000000 --- a/scripts/mongo/telegraf_mongo_template.conf +++ /dev/null @@ -1,128 +0,0 @@ -# Telegraf configuration - -# Telegraf is entirely plugin driven. All metrics are gathered from the -# declared inputs, and sent to the declared outputs. - -# Plugins must be declared in here to be active. -# To deactivate a plugin, comment out the name and any variables. - -# Use 'telegraf -config telegraf.conf -test' to see what metrics a config -# file would generate. - -# Global tags can be specified here in key="value" format. -[global_tags] - # location of the data centre - location="{{LOCATION}}" - # media service template id - sfc="{{SFC_ID}}" - # media service instance - sfc_i="{{SFC_ID_INSTANCE}}" - # service function type - sf="{{SF_ID}}" - # service function instance id - sf_i="{{SF_ID_INSTANCE}}" - # ipendpoint id aka surrogate instance - ipendpoint="{{IP_ENDPOINT_ID}}" - -# Configuration for telegraf agent -[agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will cache metric_buffer_limit metrics for each output, and will - ## flush this buffer on a successful write. - metric_buffer_limit = 1000 - ## Flush the buffer whenever full, regardless of flush_interval. - flush_buffer_when_full = true - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. You shouldn't set this below - ## interval. Maximum flush_interval will be flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## Logging configuration: - ## Run telegraf in debug mode - debug = false - ## Run telegraf in quiet mode - quiet = false - ## Specify the log file name. The empty string means to log to stdout. - logfile = "G:/Telegraf/telegraf.log" - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - - -############################################################################### -# OUTPUTS # -############################################################################### - -# Configuration for influxdb server to send metrics to -[[outputs.influxdb]] - # The full HTTP or UDP endpoint URL for your InfluxDB instance. - # Multiple urls can be specified but it is assumed that they are part of the same - # cluster, this means that only ONE of the urls will be written to each interval. - # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example - urls = ["{{INFLUXDB_URL}}"] # required - # The target database for metrics (telegraf will create it if not exists) - database = "{{DATABASE_NAME}}" # required - # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". - # note: using second precision greatly helps InfluxDB compression - precision = "s" - - ## Write timeout (for the InfluxDB client), formatted as a string. - ## If not provided, will default to 5s. 0s means no timeout (not recommended). - timeout = "5s" - # username = "telegraf" - # password = "metricsmetricsmetricsmetrics" - # Set the user agent for HTTP POSTs (can be useful for log differentiation) - # user_agent = "telegraf" - # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) - # udp_payload = 512 - - -############################################################################### -# INPUTS # -############################################################################### -# # Influx HTTP write listener -[[inputs.http_listener]] - ## Address and port to host HTTP listener on - service_address = ":8186" - - ## timeouts - read_timeout = "10s" - write_timeout = "10s" - - ## HTTPS - #tls_cert= "/etc/telegraf/cert.pem" - #tls_key = "/etc/telegraf/key.pem" - - ## MTLS - #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - -[[inputs.mongodb]] - ## An array of URLs of the form: - ## "mongodb://" [user ":" pass "@"] host [ ":" port] - ## For example: - ## mongodb://user:auth_key@10.10.3.30:27017, - ## mongodb://10.10.3.33:18832, - servers = ["mongodb://127.0.0.1:27017"] - gather_perdb_stats = false - - ## Optional SSL Config - # ssl_ca = "/etc/telegraf/ca.pem" - # ssl_cert = "/etc/telegraf/cert.pem" - # ssl_key = "/etc/telegraf/key.pem" - ## Use SSL but skip chain & host verification - # insecure_skip_verify = false \ No newline at end of file diff --git a/scripts/nginx/install-nginx.sh b/scripts/nginx/install-nginx.sh deleted file mode 100644 index a6e00d97074a8ffa82d538927db01604b97a56f4..0000000000000000000000000000000000000000 --- a/scripts/nginx/install-nginx.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -#///////////////////////////////////////////////////////////////////////// -#// -#// (c) University of Southampton IT Innovation Centre, 2017 -#// -#// Copyright in this software belongs to University of Southampton -#// IT Innovation Centre of Gamma House, Enterprise Road, -#// Chilworth Science Park, Southampton, SO16 7NS, UK. -#// -#// This software may not be used, sold, licensed, transferred, copied -#// or reproduced in whole or in part in any manner or form or in or -#// on any media by any person other than in accordance with the terms -#// of the Licence Agreement supplied with the software, or otherwise -#// without the prior written consent of the copyright owners. -#// -#// This software is distributed WITHOUT ANY WARRANTY, without even the -#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -#// PURPOSE, except where stated in the Licence Agreement supplied with -#// the software. -#// -#// Created By : Michael Boniface -#// Created Date : 23/01/2018 -#// Created for Project : FLAME -#// -#///////////////////////////////////////////////////////////////////////// - -# Install apache -sudo apt-get update -yes Y | sudo apt-get install nginx \ No newline at end of file diff --git a/scripts/nginx/telegraf_nginx_template.conf b/scripts/nginx/telegraf_nginx_template.conf deleted file mode 100644 index 31c97d16ee761205e56d77adba38f74d950705bb..0000000000000000000000000000000000000000 --- a/scripts/nginx/telegraf_nginx_template.conf +++ /dev/null @@ -1,127 +0,0 @@ -# Telegraf configuration - -# Telegraf is entirely plugin driven. All metrics are gathered from the -# declared inputs, and sent to the declared outputs. - -# Plugins must be declared in here to be active. -# To deactivate a plugin, comment out the name and any variables. - -# Use 'telegraf -config telegraf.conf -test' to see what metrics a config -# file would generate. - -# Global tags can be specified here in key="value" format. -[global_tags] - # location of the data centre - location="{{LOCATION}}" - # media service template id - sfc="{{SFC_ID}}" - # media service instance - sfc_i="{{SFC_ID_INSTANCE}}" - # service function type - sf="{{SF_ID}}" - # service function instance id - sf_i="{{SF_ID_INSTANCE}}" - # ipendpoint id aka surrogate instance - ipendpoint="{{IP_ENDPOINT_ID}}" - -# Configuration for telegraf agent -[agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will cache metric_buffer_limit metrics for each output, and will - ## flush this buffer on a successful write. - metric_buffer_limit = 1000 - ## Flush the buffer whenever full, regardless of flush_interval. - flush_buffer_when_full = true - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. You shouldn't set this below - ## interval. Maximum flush_interval will be flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## Logging configuration: - ## Run telegraf in debug mode - debug = false - ## Run telegraf in quiet mode - quiet = false - ## Specify the log file name. The empty string means to log to stdout. - logfile = "G:/Telegraf/telegraf.log" - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - - -############################################################################### -# OUTPUTS # -############################################################################### - -# Configuration for influxdb server to send metrics to -[[outputs.influxdb]] - # The full HTTP or UDP endpoint URL for your InfluxDB instance. - # Multiple urls can be specified but it is assumed that they are part of the same - # cluster, this means that only ONE of the urls will be written to each interval. - # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example - urls = ["{{INFLUXDB_URL}}"] # required - # The target database for metrics (telegraf will create it if not exists) - database = "{{DATABASE_NAME}}" # required - # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". - # note: using second precision greatly helps InfluxDB compression - precision = "s" - - ## Write timeout (for the InfluxDB client), formatted as a string. - ## If not provided, will default to 5s. 0s means no timeout (not recommended). - timeout = "5s" - # username = "telegraf" - # password = "metricsmetricsmetricsmetrics" - # Set the user agent for HTTP POSTs (can be useful for log differentiation) - # user_agent = "telegraf" - # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) - # udp_payload = 512 - - -############################################################################### -# INPUTS # -############################################################################### -# # Influx HTTP write listener -[[inputs.http_listener]] - ## Address and port to host HTTP listener on - service_address = ":8186" - - ## timeouts - read_timeout = "10s" - write_timeout = "10s" - - ## HTTPS - #tls_cert= "/etc/telegraf/cert.pem" - #tls_key = "/etc/telegraf/key.pem" - - ## MTLS - #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - -# Read Nginx's basic status information (ngx_http_stub_status_module) -[[inputs.nginx]] - ## An array of Nginx stub_status URI to gather stats. - urls = ["http://localhost/server_status"] - - ## Optional SSL Config - # ssl_ca = "/etc/telegraf/ca.pem" - # ssl_cert = "/etc/telegraf/cert.pem" - # ssl_key = "/etc/telegraf/key.pem" - ## Use SSL but skip chain & host verification - # insecure_skip_verify = false - - ## HTTP response timeout (default: 5s) - response_timeout = "5s" \ No newline at end of file diff --git a/src/mediaServiceSim/LineProtocolGenerator.py b/src/mediaServiceSim/LineProtocolGenerator.py deleted file mode 100644 index 3d4b07736b3fa3b318754d411aaeb1d91aa2f537..0000000000000000000000000000000000000000 --- a/src/mediaServiceSim/LineProtocolGenerator.py +++ /dev/null @@ -1,307 +0,0 @@ -# line protocol - -# Method to create a full InfluxDB request statement (based on partial statement from client) -import uuid -from random import random, randint - - -# Reports TX and RX, scaling on requested quality -def generate_network_report(recieved_bytes, sent_bytes, time): - # Measurement - result = 'net_port_io' - # Tags - result += ',port_id=enps03 ' - # Fields - result += 'RX_BYTES_PORT_M=' + str(recieved_bytes) + "," - result += 'TX_BYTES_PORT_M=' + str(sent_bytes) - # Timestamp - result += ' ' + str(_getNSTime(time)) - - # Measurement - #print(result) - return result - - -# Formats VM config -def generate_vm_config(state, cpu, mem, storage, time): - # metric - result = 'vm_res_alloc' - # Tags - result += ',vm_state=' + quote_wrap(state) - result += ' ' - # Fields - result += 'cpu=' + str(cpu) - result += ',memory=' + quote_wrap(mem) - result += ',storage=' + quote_wrap(storage) - - # Time - result += ' ' + str(_getNSTime(time)) - - print(result) - return result - - -# Reports cpu usage, scaling on requests -def generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, time): - result = 'cpu_usage' - # Tag - result += ' ' - # field - result += 'cpu_usage='+str(cpu_usage) - result += ',cpu_active_time='+str(cpu_active_time) - result += ',cpu_idle_time='+str(cpu_idle_time) - result += ' ' - # Time - result += str(_getNSTime(time)) - print(result) - return result - - -# Reports response times, scaling on number of requests -def generate_mpegdash_report(resource, requests, avg_response_time, peak_response_time, time): - # Measurement - result = 'mpegdash_service' - # Tags - result += ',cont_nav=\"' + str(resource) + "\" " - # Fields - - # result += 'cont_rep=' + str(quality) + ',' - result += 'requests=' + str(requests) + ',' - result += 'avg_response_time=' + str(avg_response_time) + ',' - result += 'peak_response_time=' + str(peak_response_time) - # Timestamp - result += ' ' + str(_getNSTime(time)) - print(result) - return result - -#ipendpoint_route,ipendpoint_id,cont_nav=FQDN HTTP_REQUESTS_FQDN_M, NETWORK_FQDN_LATENCY timestamp -def generate_ipendpoint_route(resource, requests, latency, time): - # Measurement - result = 'ipendpoint_route' - # Tags - result += ',cont_nav=\"' + str(resource) + "\" " - # Fields - - # result += 'cont_rep=' + str(quality) + ',' - result += 'http_requests_fqdn_m=' + str(requests) + ',' - result += 'network_fqdn_latency=' + str(latency) - # Timestamp - result += ' ' + str(_getNSTime(time)) - #print(result) - return result - -# Influx needs strings to be quoted, this provides a utility interface to do this -def quote_wrap(str): - return "\"" + str + "\"" - - -# InfluxDB likes to have time-stamps in nanoseconds -def _getNSTime(time): - # Convert to nano-seconds - timestamp = int(1000000000*time) - #print("timestamp", timestamp) - return timestamp - -# DEPRICATED -# ____________________________________________________________________________ - -# DEPRICATED: old structure, not part of new spec -def _generateClientRequest(cReq, id, time): - # Tags first - result = 'sid="' + str(id) + '",' + cReq - - # Fields - # No additional fields here yet - - # Timestamp - result += ' ' + str(_getNSTime(time)) - - # Measurement - return 'request,' + result - - -# Method to create a full InfluxDB response statement -# DEPRECATED: old structure, not part of new spec -def _generateServerResponse(reqID, quality, time, cpuUsage, qualityDifference): - # Tags first - result = ' ' - - # Fields - result += 'quality=' + str(quality) + ',' - result += 'cpuUsage=' + str(cpuUsage) + ',' - result += 'qualityDifference=' + str(qualityDifference) + ',' - result += 'requestID="' + str(reqID) + '",' - result += 'index="' + str(uuid.uuid4()) + '"' - - # Timestamp - result += ' ' + str(_getNSTime(time)) - - # Measurement - # print('response'+result) - return 'response' + result - - - -# Formats server config -def _generateServerConfig(ID, location, cpu, mem, storage, time): - # metric - result = 'host_resource' - # Tags - result += ',slice_id=' + quote_wrap(ID) - result += ',location=' + quote_wrap(location) - result += ' ' - # Fields - result += 'cpu=' + str(cpu) - result += ',memory=' + quote_wrap(mem) - result += ',storage=' + quote_wrap(storage) - - # Time - result += ' ' + str(_getNSTime(time)) - - print(result) - return result - - - -# Format port config -def _configure_port(port_id, state, rate, time): - # metric - result = 'net_port_config ' - # Fields - result += 'port_id=' + quote_wrap('enps' + port_id) - result += ',port_state=' + quote_wrap(state) - result += ',tx_constraint=' + quote_wrap(rate) - result += ' ' - - # Time - result += ' ' + str(_getNSTime(time)) - - print(result) - return result - - -# Format service function config -def _configure_service_function(state, max_connected_clients): - # measurement - result = 'mpegdash_service_config' - # tags - result += ',service_state='+quote_wrap(state) - result += ' ' - # fields - result += 'max_connected_clients='+str(max_connected_clients) - - return result - - - -# Reports memory usage, scaling on requests -def generate_mem_report(requests, total_mem, time): - # Measurement - result = 'mem' - result += ' ' - # field - used = randint(0, min(100,5*requests)) - available = 100-used - result += 'available_percent='+str(available) - result += ',used_percent='+str(used) - result += ',total='+str(total_mem) - result += ' ' - # Time - result += str(_getNSTime(time)) - print(result) - return result - - -# Formats compute node config -def generate_compute_node_config(slice_id, location, node_id, cpus, mem, storage, time): - # Measurement - result = 'compute_node_config' - # CommonContext Tag - result += ',slide_id='+quote_wrap(slice_id) - # Tag - result += ',location='+quote_wrap(location) - result += ',comp_node_id='+quote_wrap(node_id) - result += ' ' - # field - result += 'cpus='+str(cpus) - result += ',memory='+str(mem) - result += ',storage='+str(storage) - result += ' ' - # Time - result += str(_getNSTime(time)) - print(result) - return result - - -# Formats network resource config -def generate_network_resource_config(slice_id, network_id, bandwidth, time): - # Measurement - result = 'network_resource_config' - # Meta Tag - result += ',slice_id='+quote_wrap(slice_id) - # Tag - result += 'network_id='+quote_wrap(network_id) - result += ' ' - # field - result += 'bandwidth='+str(bandwidth) - result += ' ' - # Time - result += str(_getNSTime(time)) - print(result) - return result - - -# Formats network interface config -def generate_network_interface_config(slice_id, comp_node_id, port_id, rx_constraint, tx_constraint, time): - # Measurement - result = 'network_interface_config' - # Meta Tag - result += ',slice_id'+quote_wrap(slice_id) - # Tags - result += ',comp_node_id='+quote_wrap(comp_node_id) - result += ',port_id='+quote_wrap(port_id) - result += ' ' - # field - result += 'rx_constraint='+str(rx_constraint) - result += ',tx_constraint='+str(tx_constraint) - result += ' ' - # Time - result += str(_getNSTime(time)) - print(result) - return result - - -# Format SF instance config -def generate_sf_instance_surrogate_config(loc, sfc, sfc_i, sf_package, sf_i, cpus, mem, storage, time): - # Measurement - result = 'sf_instance_surrogate_config' - # Meta Tag - result += ',location'+quote_wrap(loc) - result += ',sfc'+quote_wrap(sfc) - result += ',sfc_i'+quote_wrap(sfc_i) - result += ',sf_package'+quote_wrap(sf_package) - result += ',sf_i'+quote_wrap(sf_i) - result += ' ' - # field - result += 'cpus='+str(cpus) - result += ',memory='+str(mem) - result += ',storage='+str(storage) - result += ' ' - # Time - result += str(_getNSTime(time)) - print(result) - return result - - -# Formats context container as part of other line protocol generators -def service_function_measurement(measurement, service_function_context): - result = measurement - result += ',sfc'+quote_wrap(service_function_context.sfc) - result += ',sfc_i'+quote_wrap(service_function_context.sfc_i) - result += ',sf_package'+quote_wrap(service_function_context.sf_package) - result += ',sf_i'+quote_wrap(service_function_context.sf_i) - - return result - - - diff --git a/src/mediaServiceSim/serviceSim.py b/src/mediaServiceSim/serviceSim.py deleted file mode 100644 index 2cdc993af6c0d0b543abbc7b5cd396a3a786fd8e..0000000000000000000000000000000000000000 --- a/src/mediaServiceSim/serviceSim.py +++ /dev/null @@ -1,437 +0,0 @@ -# coding: utf-8 -## /////////////////////////////////////////////////////////////////////// -## -## © University of Southampton IT Innovation Centre, 2018 -## -## Copyright in this software belongs to University of Southampton -## IT Innovation Centre of Gamma House, Enterprise Road, -## Chilworth Science Park, Southampton, SO16 7NS, UK. -## -## This software may not be used, sold, licensed, transferred, copied -## or reproduced in whole or in part in any manner or form or in or -## on any media by any person other than in accordance with the terms -## of the Licence Agreement supplied with the software, or otherwise -## without the prior written consent of the copyright owners. -## -## This software is distributed WITHOUT ANY WARRANTY, without even the -## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -## PURPOSE, except where stated in the Licence Agreement supplied with -## the software. -## -## Created By : Simon Crowle -## Created Date : 03-01-2018 -## Created for Project : FLAME -## -##/////////////////////////////////////////////////////////////////////// - -from random import random, randint - -import math -import time -import datetime -import uuid -import urllib.parse -import urllib.request -import LineProtocolGenerator as lp - - -# DemoConfig is a configuration class used to set up the simulation -class DemoConfig(object): - def __init__(self): - self.LOG_DATA = False # Log data sent to INFLUX if true - self.ITERATION_STRIDE = 10 # Number of seconds of requests/responses sent to INFLUXDB per HTTP POST - self.SEG_LENGTH = 4 # Each MPEG segment encodes 5 seconds worth of frames (assume double-buffering) - self.MAX_SEG = (30 * 60) / (self.SEG_LENGTH + 1) # 30 mins - self.MIN_QUALITY = 5 # Minimum quality requested by a client - self.MAX_QUALITY = 9 # Maximum quality requested by a client - self.MIN_SERV_RESP_TIME = 100 # Mininum time taken for server to respond to a request (ms) - self.CLIENT_START_DELAY_MAX = 360 # Randomly delay clients starting stream up to 3 minutes - - -dc = DemoConfig() - - -# DemoClient is a class the simulations the behaviour of a single client requesting video from the server -class DemoClient(object): - def __init__(self): - self.startRequestOffset = randint(0, - dc.CLIENT_START_DELAY_MAX) # Random time offset before requesting 1st segment - self.numSegRequests = dc.MAX_SEG - randint(0, 50) # Randomly stop client watching all of video - self.id = uuid.uuid4() # Client's ID - self.currSeg = 1 # Client's current segment - self.nextSegCountDown = 0 # Count-down before asking for next segment - self.qualityReq = randint(dc.MIN_QUALITY, dc.MAX_QUALITY) # Randomly assigned quality for this client - self.lastReqID = None # ID used to track last request made by this client - - def getQuality(self): - return self.qualityReq - - def getLastRequestID(self): - return self.lastReqID - - def iterateRequest(self): - result = None - - # If the time offset before asking for 1st segment is through and there are more segments to get - # and it is time to get one, then create a request for one! - if (self.startRequestOffset == 0): - if (self.numSegRequests > 0): - if (self.nextSegCountDown == 0): - - # Generate a request ID - self.lastReqID = uuid.uuid4() - - # Start building the InfluxDB statement - # tags first - result = 'cid="' + str(self.id) + '",' - result += 'segment=' + str(self.currSeg) + ' ' - - # then fields - result += 'quality=' + str(self.qualityReq) + ',' - result += 'index="' + str(self.lastReqID) + '"' - - # Update this client's segment tracking - self.currSeg += 1 - self.numSegRequests -= 1 - self.nextSegCountDown = dc.SEG_LENGTH - else: - self.nextSegCountDown -= 1 - else: - self.startRequestOffset -= 1 - - # Return the _partial_ InfluxDB statement (server will complete the rest) - return result - - -# Used to tell influx to launch or teardown a database (DB name overwritten by telegraf) -class DatabaseManager: - def __init__(self, influx_url, db_name): - self.influx_url = influx_url - self.influx_db = db_name - - def database_up(self): - self._createDB() - - def database_teardown(self): - self._deleteDB() - - def _createDB(self): - self._sendInfluxQuery('CREATE DATABASE ' + self.influx_db) - - def _deleteDB(self): - self._sendInfluxQuery('DROP DATABASE ' + self.influx_db) - - def _sendInfluxQuery(self, query): - query = urllib.parse.urlencode({'q': query}) - query = query.encode('ascii') - req = urllib.request.Request(self.influx_url + '/query ', query) - urllib.request.urlopen(req) - - -# Used to allocate clients to servers -class ClientManager: - def __init__(self, servers): - self.servers = servers - def generate_new_clients(self, amount): - assigned_count = 0 - while(assigned_count < amount): - for server in self.servers: - if(assigned_count < amount): - server.assign_client(DemoClient()) - assigned_count += 1 - - -# Simulates nodes not connected directly to clients (e.g. telegraf) -class Node: - def __init__(self, influxurl, influxdb, input_cpu): - self.influx_url = influxurl - self.influx_db = influxdb - self.report_cpu = input_cpu - def iterateService(self): - if self.report_cpu: - self._sendInfluxData(lp.generate_CPU_report(0)) - self._sendInfluxData(lp.generate_mem_report(10, 0)) - - # Private Methods - # ________________________________________________________________ - - # This is duplicated from DemoServer, should probably be refactored - def _sendInfluxData(self, data): - data = data.encode() - header = {'Content-Type': 'application/octet-stream'} - req = urllib.request.Request(self.influx_url + '/write?db=' + self.influx_db, data, header) - urllib.request.urlopen(req) - -# Container for common SF tags, used as part of generating SF usage reports - - -# DemoServer is the class that simulates the behaviour of the MPEG-DASH server -class DemoServer(object): - def __init__(self, si, db_url, db_name, server_id, server_location): - self.influxDB = db_name # InfluxDB database name - self.id = uuid.uuid4() # MPEG-DASH server ID - self.simIterations = si # Number of iterations to make for this simulation - self.influxURL = db_url # InfluxDB connection URL - self.currentTime = int(round(time.time() * 1000)) # The current time - self._configure(server_id, server_location) - self.clients = [] - - def shutdown(self): - print("Shutting down") - self.configure_VM('stopping') - - def assign_client(self, new_client): - self.clients.append(new_client) - print('Number of clients: ' + str(len(self.clients))) - - def configure_server(self, server_id, server_location): - print("Configuring Servers") - server_conf_block = [] - server_conf_block.append(lp._generateServerConfig(server_id, server_location, 8, '100G', '1T', - self._selectDelay(0))) - - #ids = ['A', 'B', 'C'] - #locations = ['locA', 'locB', 'locC'] - #for i, id in enumerate(ids): - # server_conf_block.append( - # lp._generateServerConfig(id, locations[i], 8, '100G', '1T', self._selectDelay(len(ids)))) - self._sendInfluxDataBlock(server_conf_block) - - def configure_VM(self, state): - print("Configuring VM node") - self._sendInfluxData(self._generateVM(state, 1)) - - def configure_ports(self): - print("Configuring Servers") - server_conf_block = [] - for i in range(0, 10): - server_conf_block.append(lp._configure_port()) - self._sendInfluxDataBlock(server_conf_block) - - def shutdown_VM(self): - print("Shutting down VM nodes") - VM_conf_block = [] - self._generateVMS('stopping', 10, VM_conf_block) - - self._sendInfluxDataBlock(VM_conf_block) - - def iterateService(self): - # The simulation will run through 'X' iterations of the simulation - # each time this method is called. This allows request/response messages to be - # batched and sent to the InfluxDB in sensible sized blocks - return self._executeServiceIteration(dc.ITERATION_STRIDE) - - def _executeServiceIteration(self, count): - - requestBlock = [] - responseBlock = [] - networkBlock = [] - SFBlock = [] - totalDifference = sumOfclientQuality = percentageDifference = 0 - - # Keep going until this stride (count) completes - while (count > 0): - count -= 1 - - # Check we have some iterations to do - if (self.simIterations > 0): - # First record clients that request segments - clientsRequesting = [] - - # Run through all clients and see if they make a request - for client in self.clients: - - # Record request, if it was generated - cReq = client.iterateRequest() - if cReq is not None: - clientsRequesting.append(client) - requestBlock.append(lp._generateClientRequest(cReq, self.id, self.currentTime)) - - # Now generate request statistics - clientReqCount = len(clientsRequesting) - - # Create a single CPU usage metric for this iteration - cpuUsagePercentage = self._cpuUsage(clientReqCount) - - # Now generate responses, based on stats - for client in clientsRequesting: - # Generate some quality and delays based on the number of clients requesting for this iteration - qualitySelect = self._selectQuality(client.getQuality(), clientReqCount) - delaySelect = self._selectDelay(clientReqCount) + self.currentTime - qualityDifference = client.getQuality() - qualitySelect - totalDifference += qualityDifference - # print('totalDifference = ' + str(totalDifference) +'\n') - sumOfclientQuality += client.getQuality() - # print('sumOfclientQuality = ' + str(sumOfclientQuality) + '\n') - percentageDifference = int((totalDifference * 100) / sumOfclientQuality) - # print('percentageOfQualityDifference = ' + str(percentageDifference) + '%') - - responseBlock.append(lp._generateServerResponse(client.getLastRequestID(), qualitySelect, - delaySelect, cpuUsagePercentage, - percentageDifference)) - SFBlock.append(lp._generateMpegDashReport('https://netflix.com/scream', qualitySelect, delaySelect)) - - networkBlock.append(lp._generateNetworkReport(sumOfclientQuality, delaySelect)) - # Iterate the service simulation - self.simIterations -= 1 - self.currentTime += 1000 # advance 1 second - - # If we have some requests/responses to send to InfluxDB, do it - if (len(requestBlock) > 0 and len(responseBlock) > 0): - self._sendInfluxDataBlock(requestBlock) - self._sendInfluxDataBlock(responseBlock) - self._sendInfluxDataBlock(networkBlock) - self._sendInfluxDataBlock(SFBlock) - print("Sending influx data blocks") - - return self.simIterations - - def _generateVM(self, state, delay): - return lp._generateVMConfig(state, 1, '100G', '1T', self._selectDelay(delay)) - - # 'Private' methods ________________________________________________________ - def _configure(self, server_id, server_location): - print("Configuring") - self.configure_VM('starting') - self.configure_VM('running') - #time.sleep(0.1) - self.configure_server(server_id, server_location) - self._sendInfluxData(lp._configure_port('01', 'running', '1GB/s', self.currentTime)) - self._sendInfluxData(lp._configure_service_function('starting', 100)) - #time.sleep(0.1) - self._sendInfluxData(lp._configure_service_function('running', 100)) - - def _cpuUsage(self, clientCount): - cpuUsage = randint(0, 10) - - if (clientCount < 20): - cpuUsage += 5 - elif (clientCount >= 20 and clientCount < 40): - cpuUsage += 10 - elif (clientCount >= 40 and clientCount < 60): - cpuUsage += 15 - elif (clientCount >= 60 and clientCount < 80): - cpuUsage += 20 - elif (clientCount >= 80 and clientCount < 110): - cpuUsage += 30 - elif (clientCount >= 110 and clientCount < 150): - cpuUsage += 40 - elif (clientCount >= 150 and clientCount < 200): - cpuUsage += 55 - elif (clientCount >= 200 and clientCount < 300): - cpuUsage += 70 - elif (clientCount >= 300): - cpuUsage += 90 - - return cpuUsage - - # Rule to determine a response quality, based on the current number of clients requesting - def _selectQuality(self, expectedQuality, clientCount): - - result = dc.MAX_QUALITY - - if (clientCount < 50): - result = 8 - elif (clientCount >= 50 and clientCount < 100): - result = 7 - elif (clientCount >= 100 and clientCount < 150): - result = 6 - elif (clientCount >= 150 and clientCount < 200): - result = 5 - elif (clientCount >= 200 and clientCount < 250): - result = 4 - elif (clientCount >= 250 and clientCount < 300): - result = 3 - elif (clientCount >= 300): - result = 2 - - # Give the client what it wants if possible - if (result > expectedQuality): - result = expectedQuality - - return result - - # Rule to determine a delay, based on the current number of clients requesting - def _selectDelay(self, cCount): - - result = dc.MIN_SERV_RESP_TIME - - if (cCount < 50): - result = 150 - elif (cCount >= 50 and cCount < 100): - result = 200 - elif (cCount > 100 and cCount < 200): - result = 500 - elif (cCount >= 200): - result = 1000 - - # Perturb the delay a bit - result += randint(0, 20) - - return result - - # InfluxDB data send methods - # ----------------------------------------------------------------------------------------------- - - def _sendInfluxData(self, data): - data = data.encode() - header = {'Content-Type': 'application/octet-stream'} - req = urllib.request.Request(self.influxURL + '/write?db=' + self.influxDB, data, header) - urllib.request.urlopen(req) - - def _sendInfluxDataBlock(self, dataBlock): - msg = '' - for stmt in dataBlock: - msg += stmt + '\n' - - try: - if (dc.LOG_DATA == True): - print(msg) - - self._sendInfluxData(msg) - - except urllib.error.HTTPError as ex: - print("Error calling: " + str(ex.url) + "..." + str(ex.msg)) - - -# Entry point -# ----------------------------------------------------------------------------------------------- -print("Preparing simulation") -# Iterations is time in seconds for each server to simulate -iterations = 3000 -# port 8086: Direct to DB specified -# port 8186: To telegraf, telegraf specifies DB -start_time = time.localtime() -database_manager = DatabaseManager('http://localhost:8186', 'testDB') -# Set up InfluxDB (need to wait a little while) -database_manager.database_teardown() -time.sleep(2) -database_manager.database_up() -time.sleep(2) -# configure servers -demoServer_southampton = DemoServer(iterations, 'http://localhost:8186', 'testDB', "Server1", "Southampton") -demoServer_bristol = DemoServer(iterations, 'http://localhost:8186', 'testDB', "Server2", "Bristol") -telegraf_node = Node('http://localhost:8186', 'testDB', True) -server_list = [demoServer_southampton, demoServer_bristol] -client_manager = ClientManager(server_list) -client_manager.generate_new_clients(20) - -# Start simulation -print("Starting simulation") -while True: - for server in server_list: - itCount = server.iterateService() - telegraf_node.iterateService() - pcDone = round((itCount / iterations) * 100) - - print("Simulation remaining (%): " + str(pcDone) + " \r", end='') - - if itCount == 0: - break - -for server in server_list: - server.shutdown() -print("\nFinished") -end_time = time.localtime() -print("Started at {0} ended at {1}, total run time {2}".format(start_time,end_time,(end_time-start_time))) - diff --git a/src/mediaServiceSim/simulator_v2.py b/src/mediaServiceSim/simulator_v2.py deleted file mode 100644 index 0182e75dc99b9e9f28ffad87a0d4d40e5929d67b..0000000000000000000000000000000000000000 --- a/src/mediaServiceSim/simulator_v2.py +++ /dev/null @@ -1,203 +0,0 @@ -import LineProtocolGenerator as lp -import time -import urllib.parse -import urllib.request -import sys -import random - -# Simulation parameters -TICK_TIME = 1 -DEFAULT_REQUEST_RATE_INC = 1 -DEFAULT_REQUEST_RATE_INC_PERIOD = 10 -SIMULATION_TIME_SEC = 60*60 - -# CLMC parameters -INFLUX_DB_URL = 'http://192.168.50.10:8086' -AGENT_URL1 = 'http://192.168.50.11:8186' -AGENT_URL2 = 'http://192.168.50.12:8186' - -# Simulator for services -class sim: - def __init__(self, influx_url): - # We don't need this as the db is CLMC metrics - self.influx_db = 'CLMCMetrics' - self.influx_url = influx_url - # Teardown DB from previous sim and bring it back up - self._deleteDB() - self._createDB() - - - def run(self, simulation_length_seconds): - start_time = time.time()-SIMULATION_TIME_SEC - sim_time = start_time - - # segment_size : the length of video requested at a time - # bit_rate: MPEG-2 High 1080p 25fps = 80Mbps - ip_endpoints = [{'agent_url': AGENT_URL1, 'location': 'DC1', 'cpu': 16, - 'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0, - 'segment_size': 2, 'video_bit_rate': 80, 'packet_size': 1500}, - {'agent_url': AGENT_URL2, 'location': 'DC2', 'cpu': 4, - 'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0, - 'segment_size': 2, 'video_bit_rate': 80, 'packet_size': 1500} - ] - - # Simulate configuration of the ipendpoints - # endpoint state->mu, sigma, secs normal distribution - config_delay_dist = {"placing": [10, 0.68], "booting": [10, 0.68],"connecting": [10, 0.68]} - - # Place endpoints - max_delay = 0 - for ip_endpoint in ip_endpoints: - delay_time = self._changeVMState(sim_time, ip_endpoint, config_delay_dist['placing'][0], config_delay_dist['placing'][0]*config_delay_dist['placing'][1], 'placing', 'placed') - if delay_time > max_delay: - max_delay = delay_time - sim_time +=max_delay - - # Boot endpoints - max_delay = 0 - for ip_endpoint in ip_endpoints: - delay_time = self._changeVMState(sim_time, ip_endpoint, config_delay_dist['booting'][0], config_delay_dist['booting'][0]*config_delay_dist['booting'][1], 'booting', 'booted') - if delay_time > max_delay: - max_delay = delay_time - sim_time +=max_delay - - # Connect endpoints - max_delay = 0 - for ip_endpoint in ip_endpoints: - delay_time = self._changeVMState(sim_time, ip_endpoint, config_delay_dist['connecting'][0], config_delay_dist['connecting'][0]*config_delay_dist['connecting'][1], 'connecting', 'connected') - if delay_time > max_delay: - max_delay = delay_time - sim_time +=max_delay - - request_arrival_rate_inc = DEFAULT_REQUEST_RATE_INC - request_queue = 0 - inc_period_count = 0 - for i in range(simulation_length_seconds): - for ip_endpoint in ip_endpoints: - request_processing_time = 0 - cpu_time_available = 0 - requests_processed = 0 - max_requests_processed = 0 - cpu_active_time = 0 - cpu_idle_time = 0 - cpu_usage = 0 - cpu_load_time = 0 - avg_response_time = 0 - peak_response_time = 0 - - # linear inc to arrival rate - if inc_period_count >= DEFAULT_REQUEST_RATE_INC_PERIOD: - ip_endpoint['request_arrival_rate'] += request_arrival_rate_inc - inc_period_count = 0 - else: - inc_period_count += 1 - # add new requests to the queue - ip_endpoint['request_queue'] += ip_endpoint['request_arrival_rate'] - - # time to process one second of video (mS) in the current second - request_processing_time = int(random.normalvariate(10, 10*0.68)) - if request_processing_time <= 10: - request_processing_time = 10 - # time depends on the length of the segments in seconds - request_processing_time *= ip_endpoint['segment_size'] - - # amount of cpu time (mS) per tick - cpu_time_available = ip_endpoint['cpu']*TICK_TIME*1000 - max_requests_processed = int(cpu_time_available/request_processing_time) - # calc how many requests processed - if ip_endpoint['request_queue'] <= max_requests_processed: - # processed all of the requests - requests_processed = ip_endpoint['request_queue'] - else: - # processed the maxmum number of requests - requests_processed = max_requests_processed - - # calculate cpu usage - cpu_active_time = int(requests_processed*request_processing_time) - cpu_idle_time = int(cpu_time_available-cpu_active_time) - cpu_usage = cpu_active_time/cpu_time_available - self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, sim_time)) - - # calc network usage metrics - bytes_rx = 2048*requests_processed - bytes_tx = int(ip_endpoint['video_bit_rate']/8*1000000*requests_processed*ip_endpoint['segment_size']) - self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_network_report(bytes_rx, bytes_tx, sim_time)) - - # time to process all of the requests in the queue - peak_response_time = ip_endpoint['request_queue']*request_processing_time/ip_endpoint['cpu'] - # mid-range - avg_response_time = (peak_response_time+request_processing_time)/2 - self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_mpegdash_report('http://localhost/server-status?auto', ip_endpoint['request_arrival_rate'], avg_response_time, peak_response_time, sim_time)) - - # need to calculate this but sent at 5mS for now - network_request_delay = 0.005 - - # calculate network response delays (2km link, 100Mbps) - network_response_delay = self._calcNetworkDelay(2000, 100, ip_endpoint['packet_size'], ip_endpoint['video_bit_rate'], ip_endpoint['segment_size']) - - e2e_delay = network_request_delay + (avg_response_time/1000) + network_response_delay - - self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_ipendpoint_route('http://localhost/server-status?auto', ip_endpoint['request_arrival_rate'], e2e_delay, sim_time)) - - # remove requests processed off the queue - ip_endpoint['request_queue'] -= int(requests_processed) - - sim_time += TICK_TIME - end_time = sim_time - print("Simulation Finished. Start time {0}. End time {1}. Total time {2}".format(start_time,end_time,end_time-start_time)) - - # distance metres - # bandwidth Mbps - # package size bytes - # tx_video_bit_rate bp/sec - # segment size sec - def _calcNetworkDelay(self, distance, bandwidth, packet_size, tx_video_bit_rate, segment_size): - response_delay = 0 - - # propogation delay = distance/speed () (e.g 2000 metres * 2*10^8 for optical fibre) - propogation_delay = distance/(2*100000000) - # packetisation delay = ip packet size (bits)/tx rate (e.g. 100Mbp with 0% packet loss) - packetisation_delay = (packet_size*8)/(bandwidth*1000000) - # print('packetisation_delay:', packetisation_delay) - # total number of packets to be sent - packets = (tx_video_bit_rate*1000000)/(packet_size*8) - # print('packets:', packets) - response_delay = packets*(propogation_delay+packetisation_delay) - # print('response_delay:', response_delay) - - return response_delay - - def _changeVMState(self, sim_time, ip_endpoint, mu, sigma, transition_state, next_state): - delay_time = 0 - - self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config(transition_state, ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], sim_time)) - - delay_time = random.normalvariate(mu, sigma) - - self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config(next_state, ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], sim_time+delay_time)) - - return delay_time - - def _createDB(self): - self._sendInfluxQuery(self.influx_url, 'CREATE DATABASE ' + self.influx_db) - - - def _deleteDB(self): - self._sendInfluxQuery(self.influx_url, 'DROP DATABASE ' + self.influx_db) - - - def _sendInfluxQuery(self, url, query): - query = urllib.parse.urlencode({'q': query}) - query = query.encode('ascii') - req = urllib.request.Request(url + '/query ', query) - urllib.request.urlopen(req) - - def _sendInfluxData(self, url, data): - data = data.encode() - header = {'Content-Type': 'application/octet-stream'} - req = urllib.request.Request(url + '/write?db=' + self.influx_db, data, header) - urllib.request.urlopen(req) - -simulator = sim(INFLUX_DB_URL) -simulator.run(SIMULATION_TIME_SEC) - diff --git a/ubuntu-xenial-16.04-cloudimg-console.log b/ubuntu-xenial-16.04-cloudimg-console.log deleted file mode 100644 index 9905d844ab8e29d29f71a42ba593ecf90c2c68c5..0000000000000000000000000000000000000000 --- a/ubuntu-xenial-16.04-cloudimg-console.log +++ /dev/null @@ -1,714 +0,0 @@ -[ 0.000000] Initializing cgroup subsys cpuset -[ 0.000000] Initializing cgroup subsys cpu -[ 0.000000] Initializing cgroup subsys cpuacct -[ 0.000000] Linux version 4.4.0-98-generic (buildd@lcy01-03) (gcc version 5.4.0 20160609 (Ubuntu 5.4.0-6ubuntu1~16.04.4) ) #121-Ubuntu SMP Tue Oct 10 14:24:03 UTC 2017 (Ubuntu 4.4.0-98.121-generic 4.4.90) -[ 0.000000] Command line: BOOT_IMAGE=/boot/vmlinuz-4.4.0-98-generic root=LABEL=cloudimg-rootfs ro console=tty1 console=ttyS0 -[ 0.000000] KERNEL supported cpus: -[ 0.000000] Intel GenuineIntel -[ 0.000000] AMD AuthenticAMD -[ 0.000000] Centaur CentaurHauls -[ 0.000000] x86/fpu: xstate_offset[2]: 576, xstate_sizes[2]: 256 -[ 0.000000] x86/fpu: Supporting XSAVE feature 0x01: 'x87 floating point registers' -[ 0.000000] x86/fpu: Supporting XSAVE feature 0x02: 'SSE registers' -[ 0.000000] x86/fpu: Supporting XSAVE feature 0x04: 'AVX registers' -[ 0.000000] x86/fpu: Enabled xstate features 0x7, context size is 832 bytes, using 'standard' format. -[ 0.000000] x86/fpu: Using 'lazy' FPU context switches. -[ 0.000000] e820: BIOS-provided physical RAM map: -[ 0.000000] BIOS-e820: [mem 0x0000000000000000-0x000000000009fbff] usable -[ 0.000000] BIOS-e820: [mem 0x000000000009fc00-0x000000000009ffff] reserved -[ 0.000000] BIOS-e820: [mem 0x00000000000f0000-0x00000000000fffff] reserved -[ 0.000000] BIOS-e820: [mem 0x0000000000100000-0x000000001ffeffff] usable -[ 0.000000] BIOS-e820: [mem 0x000000001fff0000-0x000000001fffffff] ACPI data -[ 0.000000] BIOS-e820: [mem 0x00000000fec00000-0x00000000fec00fff] reserved -[ 0.000000] BIOS-e820: [mem 0x00000000fee00000-0x00000000fee00fff] reserved -[ 0.000000] BIOS-e820: [mem 0x00000000fffc0000-0x00000000ffffffff] reserved -[ 0.000000] NX (Execute Disable) protection: active -[ 0.000000] SMBIOS 2.5 present. -[ 0.000000] Hypervisor detected: KVM -[ 0.000000] e820: last_pfn = 0x1fff0 max_arch_pfn = 0x400000000 -[ 0.000000] MTRR: Disabled -[ 0.000000] x86/PAT: MTRRs disabled, skipping PAT initialization too. -[ 0.000000] CPU MTRRs all blank - virtualized system. -[ 0.000000] x86/PAT: Configuration [0-7]: WB WT UC- UC WB WT UC- UC -[ 0.000000] found SMP MP-table at [mem 0x0009fff0-0x0009ffff] mapped at [ffff88000009fff0] -[ 0.000000] Scanning 1 areas for low memory corruption -[ 0.000000] RAMDISK: [mem 0x1eda7000-0x1f83bfff] -[ 0.000000] ACPI: Early table checksum verification disabled -[ 0.000000] ACPI: RSDP 0x00000000000E0000 000024 (v02 VBOX ) -[ 0.000000] ACPI: XSDT 0x000000001FFF0030 00003C (v01 VBOX VBOXXSDT 00000001 ASL 00000061) -[ 0.000000] ACPI: FACP 0x000000001FFF00F0 0000F4 (v04 VBOX VBOXFACP 00000001 ASL 00000061) -[ 0.000000] ACPI: DSDT 0x000000001FFF0470 0021FF (v02 VBOX VBOXBIOS 00000002 INTL 20100528) -[ 0.000000] ACPI: FACS 0x000000001FFF0200 000040 -[ 0.000000] ACPI: FACS 0x000000001FFF0200 000040 -[ 0.000000] ACPI: APIC 0x000000001FFF0240 000054 (v02 VBOX VBOXAPIC 00000001 ASL 00000061) -[ 0.000000] ACPI: SSDT 0x000000001FFF02A0 0001CC (v01 VBOX VBOXCPUT 00000002 INTL 20100528) -[ 0.000000] No NUMA configuration found -[ 0.000000] Faking a node at [mem 0x0000000000000000-0x000000001ffeffff] -[ 0.000000] NODE_DATA(0) allocated [mem 0x1ffeb000-0x1ffeffff] -[ 0.000000] kvm-clock: Using msrs 4b564d01 and 4b564d00 -[ 0.000000] kvm-clock: cpu 0, msr 0:1ffe3001, primary cpu clock -[ 0.000000] kvm-clock: using sched offset of 3356327914 cycles -[ 0.000000] clocksource: kvm-clock: mask: 0xffffffffffffffff max_cycles: 0x1cd42e4dffb, max_idle_ns: 881590591483 ns -[ 0.000000] Zone ranges: -[ 0.000000] DMA [mem 0x0000000000001000-0x0000000000ffffff] -[ 0.000000] DMA32 [mem 0x0000000001000000-0x000000001ffeffff] -[ 0.000000] Normal empty -[ 0.000000] Device empty -[ 0.000000] Movable zone start for each node -[ 0.000000] Early memory node ranges -[ 0.000000] node 0: [mem 0x0000000000001000-0x000000000009efff] -[ 0.000000] node 0: [mem 0x0000000000100000-0x000000001ffeffff] -[ 0.000000] Initmem setup node 0 [mem 0x0000000000001000-0x000000001ffeffff] -[ 0.000000] ACPI: PM-Timer IO Port: 0x4008 -[ 0.000000] IOAPIC[0]: apic_id 1, version 32, address 0xfec00000, GSI 0-23 -[ 0.000000] ACPI: INT_SRC_OVR (bus 0 bus_irq 0 global_irq 2 dfl dfl) -[ 0.000000] ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 high level) -[ 0.000000] Using ACPI (MADT) for SMP configuration information -[ 0.000000] smpboot: Allowing 1 CPUs, 0 hotplug CPUs -[ 0.000000] PM: Registered nosave memory: [mem 0x00000000-0x00000fff] -[ 0.000000] PM: Registered nosave memory: [mem 0x0009f000-0x0009ffff] -[ 0.000000] PM: Registered nosave memory: [mem 0x000a0000-0x000effff] -[ 0.000000] PM: Registered nosave memory: [mem 0x000f0000-0x000fffff] -[ 0.000000] e820: [mem 0x20000000-0xfebfffff] available for PCI devices -[ 0.000000] Booting paravirtualized kernel on KVM -[ 0.000000] clocksource: refined-jiffies: mask: 0xffffffff max_cycles: 0xffffffff, max_idle_ns: 7645519600211568 ns -[ 0.000000] setup_percpu: NR_CPUS:512 nr_cpumask_bits:512 nr_cpu_ids:1 nr_node_ids:1 -[ 0.000000] PERCPU: Embedded 34 pages/cpu @ffff88001fc00000 s98328 r8192 d32744 u2097152 -[ 0.000000] PV qspinlock hash table entries: 256 (order: 0, 4096 bytes) -[ 0.000000] Built 1 zonelists in Node order, mobility grouping on. Total pages: 128889 -[ 0.000000] Policy zone: DMA32 -[ 0.000000] Kernel command line: BOOT_IMAGE=/boot/vmlinuz-4.4.0-98-generic root=LABEL=cloudimg-rootfs ro console=tty1 console=ttyS0 -[ 0.000000] PID hash table entries: 2048 (order: 2, 16384 bytes) -[ 0.000000] Memory: 485784K/523832K available (8484K kernel code, 1294K rwdata, 3988K rodata, 1492K init, 1316K bss, 38048K eserved, 0K cma-reserved) -[ 0.000000] SLUB: HWalign=64, Order=0-3, MinObjects=0, CPUs=1, Nodes=1 -[ 0.000000] Hierarchical RCU implementation. -[ 0.000000] Build-time adjustment of leaf fanout to 64. -[ 0.000000] RCU restricting CPUs from NR_CPUS=512 to nr_cpu_ids=1. -[ 0.000000] RCU: Adjusting geometry for rcu_fanout_leaf=64, nr_cpu_ids=1 -[ 0.000000] NR_IRQS:33024 nr_irqs:256 16 -[ 0.000000] Console: colour VGA+ 80x25 -[ 0.000000] console [tty1] enabled -[ 0.000000] console [ttyS0] enabled -[ 0.000000] tsc: Detected 2693.764 MHz processor -[ 1.177708] Calibrating delay loop (skipped) preset value.. 5387.52 BogoMIPS (lpj=10775056) -[ 1.182172] pid_max: default: 32768 minimum: 301 -[ 1.191041] ACPI: Core revision 20150930 -[ 1.194114] ACPI: 2 ACPI AML tables successfully acquired and loaded -[ 1.201665] Security Framework initialized -[ 1.203602] Yama: becoming mindful. -[ 1.228842] AppArmor: AppArmor initialized -[ 1.232696] Dentry cache hash table entries: 65536 (order: 7, 524288 bytes) -[ 1.261032] Inode-cache hash table entries: 32768 (order: 6, 262144 bytes) -[ 1.302749] Mount-cache hash table entries: 1024 (order: 1, 8192 bytes) -[ 1.305136] Mountpoint-cache hash table entries: 1024 (order: 1, 8192 bytes) -[ 1.308057] Initializing cgroup subsys io -[ 1.327368] Initializing cgroup subsys memory -[ 1.328095] Initializing cgroup subsys devices -[ 1.330001] Initializing cgroup subsys freezer -[ 1.407373] Initializing cgroup subsys net_cls -[ 1.422985] Initializing cgroup subsys perf_event -[ 1.423892] Initializing cgroup subsys net_prio -[ 1.465079] Initializing cgroup subsys hugetlb -[ 1.530508] Initializing cgroup subsys pids -[ 1.557886] CPU: Physical Processor ID: 0 -[ 1.594966] mce: CPU supports 0 MCE banks -[ 1.661619] process: using mwait in idle threads -[ 1.672887] Last level iTLB entries: 4KB 1024, 2MB 1024, 4MB 1024 -[ 1.758349] Last level dTLB entries: 4KB 1024, 2MB 1024, 4MB 1024, 1GB 4 -[ 1.783474] Freeing SMP alternatives memory: 32K -[ 1.798572] ftrace: allocating 32154 entries in 126 pages -[ 1.867041] smpboot: APIC(0) Converting physical 0 to logical package 0 -[ 1.902035] smpboot: Max logical packages: 1 -[ 1.908835] x2apic enabled -[ 1.921370] Switched APIC routing to physical x2apic. -[ 1.926437] ..TIMER: vector=0x30 apic1=0 pin1=2 apic2=-1 pin2=-1 -[ 2.034605] smpboot: CPU0: Intel(R) Core(TM) i7-4600U CPU @ 2.10GHz (family: 0x6, model: 0x45, stepping: 0x1) -[ 2.143683] Performance Events: unsupported p6 CPU model 69 no PMU driver, software events only. -[ 2.155720] KVM setup paravirtual spinlock -[ 2.169362] x86: Booted up 1 node, 1 CPUs -[ 2.182338] smpboot: Total of 1 processors activated (5387.52 BogoMIPS) -[ 2.197314] devtmpfs: initialized -[ 2.200936] evm: security.selinux -[ 2.208477] evm: security.SMACK64 -[ 2.210956] evm: security.SMACK64EXEC -[ 2.226877] evm: security.SMACK64TRANSMUTE -[ 2.234591] evm: security.SMACK64MMAP -[ 2.239268] evm: security.ima -[ 2.247861] evm: security.capability -[ 2.252244] clocksource: jiffies: mask: 0xffffffff max_cycles: 0xffffffff, max_idle_ns: 7645041785100000 ns -[ 2.311061] futex hash table entries: 256 (order: 2, 16384 bytes) -[ 2.405214] pinctrl core: initialized pinctrl subsystem -[ 2.413018] RTC time: 16:51:02, date: 01/30/18 -[ 2.413875] NET: Registered protocol family 16 -[ 2.443252] cpuidle: using governor ladder -[ 2.443948] cpuidle: using governor menu -[ 2.474617] PCCT header not found. -[ 2.527281] ACPI: bus type PCI registered -[ 2.557792] acpiphp: ACPI Hot Plug PCI Controller Driver version: 0.5 -[ 2.573111] PCI: Using configuration type 1 for base access -[ 2.576719] ACPI: Added _OSI(Module Device) -[ 2.590124] ACPI: Added _OSI(Processor Device) -[ 2.618039] ACPI: Added _OSI(3.0 _SCP Extensions) -[ 2.618801] ACPI: Added _OSI(Processor Aggregator Device) -[ 2.620260] ACPI: Executed 1 blocks of module-level executable AML code -[ 2.630613] ACPI: Interpreter enabled -[ 2.631281] ACPI: (supports S0 S5) -[ 2.631890] ACPI: Using IOAPIC for interrupt routing -[ 2.634674] PCI: Using host bridge windows from ACPI; if necessary, use "pci=nocrs" and report a bug -[ 2.649505] ACPI: PCI Root Bridge [PCI0] (domain 0000 [bus 00-ff]) -[ 2.659445] acpi PNP0A03:00: _OSC: OS supports [ASPM ClockPM Segments MSI] -[ 2.664348] acpi PNP0A03:00: _OSC: not requesting OS control; OS requires [ExtendedConfig ASPM ClockPM MSI] -[ 2.675548] acpi PNP0A03:00: fail to add MMCONFIG information, can't access extended PCI configuration space under this bridge. -[ 2.680088] PCI host bridge to bus 0000:00 -[ 2.680775] pci_bus 0000:00: root bus resource [io 0x0000-0x0cf7 window] -[ 2.684682] pci_bus 0000:00: root bus resource [io 0x0d00-0xffff window] -[ 2.685675] pci_bus 0000:00: root bus resource [mem 0x000a0000-0x000bffff window] -[ 2.701978] pci_bus 0000:00: root bus resource [mem 0x20000000-0xffdfffff window] -[ 2.772991] pci_bus 0000:00: root bus resource [bus 00-ff] -[ 2.830785] pci 0000:00:01.1: legacy IDE quirk: reg 0x10: [io 0x01f0-0x01f7] -[ 2.921165] pci 0000:00:01.1: legacy IDE quirk: reg 0x14: [io 0x03f6] -[ 2.941288] pci 0000:00:01.1: legacy IDE quirk: reg 0x18: [io 0x0170-0x0177] -[ 2.988199] pci 0000:00:01.1: legacy IDE quirk: reg 0x1c: [io 0x0376] -[ 3.068971] pci 0000:00:07.0: quirk: [io 0x4000-0x403f] claimed by PIIX4 ACPI -[ 3.139125] pci 0000:00:07.0: quirk: [io 0x4100-0x410f] claimed by PIIX4 SMB -[ 3.196822] ACPI: PCI Interrupt Link [LNKA] (IRQs 5 9 10 *11) -[ 3.253103] ACPI: PCI Interrupt Link [LNKB] (IRQs 5 9 10 *11) -[ 3.272524] ACPI: PCI Interrupt Link [LNKC] (IRQs 5 9 *10 11) -[ 3.293568] ACPI: PCI Interrupt Link [LNKD] (IRQs 5 *9 10 11) -[ 3.303238] ACPI: Enabled 2 GPEs in block 00 to 07 -[ 3.308450] vgaarb: setting as boot device: PCI:0000:00:02.0 -[ 3.311192] vgaarb: device added: PCI:0000:00:02.0,decodes=io+mem,owns=io+mem,locks=none -[ 3.317854] vgaarb: loaded -[ 3.318473] vgaarb: bridge control possible 0000:00:02.0 -[ 3.324242] SCSI subsystem initialized -[ 3.326966] ACPI: bus type USB registered -[ 3.329788] usbcore: registered new interface driver usbfs -[ 3.330669] usbcore: registered new interface driver hub -[ 3.332675] usbcore: registered new device driver usb -[ 3.337353] PCI: Using ACPI for IRQ routing -[ 3.338638] NetLabel: Initializing -[ 3.340460] NetLabel: domain hash size = 128 -[ 3.344352] NetLabel: protocols = UNLABELED CIPSOv4 -[ 3.347084] NetLabel: unlabeled traffic allowed by default -[ 3.348003] amd_nb: Cannot enumerate AMD northbridges -[ 3.350268] clocksource: Switched to clocksource kvm-clock -[ 3.357214] AppArmor: AppArmor Filesystem Enabled -[ 3.368129] pnp: PnP ACPI init -[ 3.375366] pnp: PnP ACPI: found 3 devices -[ 3.387828] clocksource: acpi_pm: mask: 0xffffff max_cycles: 0xffffff, max_idle_ns: 2085701024 ns -[ 3.417184] NET: Registered protocol family 2 -[ 3.422463] TCP established hash table entries: 4096 (order: 3, 32768 bytes) -[ 3.432398] TCP bind hash table entries: 4096 (order: 4, 65536 bytes) -[ 3.435229] TCP: Hash tables configured (established 4096 bind 4096) -[ 3.446218] UDP hash table entries: 256 (order: 1, 8192 bytes) -[ 3.450135] UDP-Lite hash table entries: 256 (order: 1, 8192 bytes) -[ 3.458252] NET: Registered protocol family 1 -[ 3.459362] pci 0000:00:00.0: Limiting direct PCI/PCI transfers -[ 3.465275] pci 0000:00:01.0: Activating ISA DMA hang workarounds -[ 3.478552] Unpacking initramfs... -[ 5.120897] Freeing initrd memory: 10836K -[ 5.128277] RAPL PMU detected, API unit is 2^-32 Joules, 4 fixed counters 10737418240 ms ovfl timer -[ 5.132189] hw unit of domain pp0-core 2^-0 Joules -[ 5.173108] hw unit of domain package 2^-0 Joules -[ 5.258792] hw unit of domain dram 2^-0 Joules -[ 5.300247] hw unit of domain pp1-gpu 2^-0 Joules -[ 5.348701] platform rtc_cmos: registered platform RTC device (no PNP device found) -[ 5.359756] Scanning for low memory corruption every 60 seconds -[ 5.360884] audit: initializing netlink subsys (disabled) -[ 5.365658] audit: type=2000 audit(1517331067.783:1): initialized -[ 5.366815] Initialise system trusted keyring -[ 5.370903] HugeTLB registered 2 MB page size, pre-allocated 0 pages -[ 5.395286] zbud: loaded -[ 5.406402] VFS: Disk quotas dquot_6.6.0 -[ 5.421077] VFS: Dquot-cache hash table entries: 512 (order 0, 4096 bytes) -[ 5.434274] squashfs: version 4.0 (2009/01/31) Phillip Lougher -[ 5.448895] fuse init (API version 7.23) -[ 5.455642] Key type big_key registered -[ 5.456604] Allocating IMA MOK and blacklist keyrings. -[ 5.459785] Key type asymmetric registered -[ 5.466558] Asymmetric key parser 'x509' registered -[ 5.476162] Block layer SCSI generic (bsg) driver version 0.4 loaded (major 249) -[ 5.496399] io scheduler noop registered -[ 5.497111] io scheduler deadline registered (default) -[ 5.497957] io scheduler cfq registered -[ 5.498723] pci_hotplug: PCI Hot Plug PCI Core version: 0.5 -[ 5.501801] pciehp: PCI Express Hot Plug Controller Driver version: 0.4 -[ 5.502958] ACPI: AC Adapter [AC] (on-line) -[ 5.505168] input: Power Button as /devices/LNXSYSTM:00/LNXPWRBN:00/input/input0 -[ 5.508743] ACPI: Power Button [PWRF] -[ 5.513858] input: Sleep Button as /devices/LNXSYSTM:00/LNXSLPBN:00/input/input1 -[ 5.533584] ACPI: Sleep Button [SLPF] -[ 5.545548] ACPI: Battery Slot [BAT0] (battery present) -[ 5.546395] GHES: HEST is not enabled! -[ 5.547161] Serial: 8250/16550 driver, 32 ports, IRQ sharing enabled -[ 5.599027] 00:02: ttyS0 at I/O 0x3f8 (irq = 4, base_baud = 115200) is a 16550A -[ 5.613026] Linux agpgart interface v0.103 -[ 5.676910] loop: module loaded -[ 5.703515] scsi host0: ata_piix -[ 5.747637] scsi host1: ata_piix -[ 5.801934] ata1: PATA max UDMA/33 cmd 0x1f0 ctl 0x3f6 bmdma 0xd000 irq 14 -[ 5.870246] ata2: PATA max UDMA/33 cmd 0x170 ctl 0x376 bmdma 0xd008 irq 15 -[ 5.875754] libphy: Fixed MDIO Bus: probed -[ 5.880154] tun: Universal TUN/TAP device driver, 1.6 -[ 5.883399] tun: (C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com> -[ 5.889528] PPP generic driver version 2.4.2 -[ 5.890323] ehci_hcd: USB 2.0 'Enhanced' Host Controller (EHCI) Driver -[ 5.923638] ehci-pci: EHCI PCI platform driver -[ 5.924391] ehci-platform: EHCI generic platform driver -[ 5.925223] ohci_hcd: USB 1.1 'Open' Host Controller (OHCI) Driver -[ 5.927313] ohci-pci: OHCI PCI platform driver -[ 5.929596] ohci-platform: OHCI generic platform driver -[ 5.932395] uhci_hcd: USB Universal Host Controller Interface driver -[ 5.935260] i8042: PNP: PS/2 Controller [PNP0303:PS2K,PNP0f03:PS2M] at 0x60,0x64 irq 1,12 -[ 5.944052] serio: i8042 KBD port at 0x60,0x64 irq 1 -[ 5.944851] serio: i8042 AUX port at 0x60,0x64 irq 12 -[ 5.947189] mousedev: PS/2 mouse device common for all mice -[ 5.952230] input: AT Translated Set 2 keyboard as /devices/platform/i8042/serio0/input/input2 -[ 6.029844] rtc_cmos rtc_cmos: rtc core: registered rtc_cmos as rtc0 -[ 6.120088] rtc_cmos rtc_cmos: alarms up to one day, 114 bytes nvram -[ 6.186192] i2c /dev entries driver -[ 6.202732] device-mapper: uevent: version 1.0.3 -[ 6.277438] device-mapper: ioctl: 4.34.0-ioctl (2015-10-28) initialised: dm-devel@redhat.com -[ 6.325614] ledtrig-cpu: registered to indicate activity on CPUs -[ 6.331418] NET: Registered protocol family 10 -[ 6.336243] NET: Registered protocol family 17 -[ 6.382102] tsc: Refined TSC clocksource calibration: 2693.811 MHz -[ 6.383058] clocksource: tsc: mask: 0xffffffffffffffff max_cycles: 0x26d467e3d94, max_idle_ns: 440795288097 ns -[ 6.387372] Key type dns_resolver registered -[ 6.393047] microcode: CPU0 sig=0x40651, pf=0x40, revision=0x0 -[ 6.395554] microcode: Microcode Update Driver: v2.01 <tigran@aivazian.fsnet.co.uk>, Peter Oruba -[ 6.413621] registered taskstats version 1 -[ 6.446410] Loading compiled-in X.509 certificates -[ 6.471821] Loaded X.509 cert 'Build time autogenerated kernel key: 7431eaeda5a51458aeb00f8de0f18f89e178d882' -[ 6.479489] zswap: loaded using pool lzo/zbud -[ 6.495400] Key type trusted registered -[ 6.512920] Key type encrypted registered -[ 6.519806] AppArmor: AppArmor sha1 policy hashing enabled -[ 6.540613] ima: No TPM chip found, activating TPM-bypass! -[ 6.570195] evm: HMAC attrs: 0x1 -[ 6.572535] Magic number: 2:261:895 -[ 6.573266] rtc_cmos rtc_cmos: setting system clock to 2018-01-30 16:51:06 UTC (1517331066) -[ 6.590587] BIOS EDD facility v0.16 2004-Jun-25, 0 devices found -[ 6.620463] EDD information not available. -[ 6.624211] Freeing unused kernel memory: 1492K -[ 6.627252] Write protecting the kernel read-only data: 14336k -[ 6.640038] Freeing unused kernel memory: 1744K -[ 6.649153] Freeing unused kernel memory: 108K -Loading, please wait... -[ 6.664320] random: udevadm: uninitialized urandom read (16 bytes read, 3 bits of entropy available) -starting version[ 6.739126] random: systemd-udevd: uninitialized urandom read (16 bytes read, 3 bits of entropy available) - 229 -[ 6.792325] random: systemd-udevd: uninitialized urandom read (16 bytes read, 3 bits of entropy available) -[ 6.807197] random: systemd-udevd: uninitialized urandom read (16 bytes read, 3 bits of entropy available) -[ 6.808746] random: systemd-udevd: uninitialized urandom read (16 bytes read, 3 bits of entropy available) -[ 6.937872] random: udevadm: uninitialized urandom read (16 bytes read, 3 bits of entropy available) -[ 6.945029] random: udevadm: uninitialized urandom read (16 bytes read, 3 bits of entropy available) -[ 6.958621] random: udevadm: uninitialized urandom read (16 bytes read, 3 bits of entropy available) -[ 6.968789] random: udevadm: uninitialized urandom read (16 bytes read, 3 bits of entropy available) -[ 7.001038] random: udevadm: uninitialized urandom read (16 bytes read, 3 bits of entropy available) -[ 7.088228] e1000: Intel(R) PRO/1000 Network Driver - version 7.3.21-k8-NAPI -[ 7.203059] e1000: Copyright (c) 1999-2006 Intel Corporation. -[ 7.273034] Fusion MPT base driver 3.04.20 -[ 7.347167] Copyright (c) 1999-2008 LSI Corporation -[ 7.380148] AVX version of gcm_enc/dec engaged. -[ 7.396543] AES CTR mode by8 optimization enabled -[ 7.418907] Fusion MPT SPI Host driver 3.04.20 -[ 7.428150] mptbase: ioc0: Initiating bringup -[ 7.490487] ioc0: LSI53C1030 A0: Capabilities={Initiator} -[ 7.559974] input: ImExPS/2 Generic Explorer Mouse as /devices/platform/i8042/serio1/input/input4 -[ 7.717566] scsi host2: ioc0: LSI53C1030 A0, FwRev=00000000h, Ports=1, MaxQ=256, IRQ=20 -[ 7.869776] scsi 2:0:0:0: Direct-Access VBOX HARDDISK 1.0 PQ: 0 ANSI: 5 -[ 7.968673] scsi target2:0:0: Beginning Domain Validation -[ 8.066197] scsi target2:0:0: Domain Validation skipping write tests -[ 8.099305] scsi target2:0:0: Ending Domain Validation -[ 8.155219] scsi target2:0:0: asynchronous -[ 8.342376] scsi 2:0:1:0: Direct-Access VBOX HARDDISK 1.0 PQ: 0 ANSI: 5 -[ 8.476286] scsi target2:0:1: Beginning Domain Validation -[ 8.517998] scsi target2:0:1: Domain Validation skipping write tests -[ 8.522863] scsi target2:0:1: Ending Domain Validation -[ 8.555607] scsi target2:0:1: asynchronous -[ 8.614110] e1000 0000:00:03.0 eth0: (PCI:33MHz:32-bit) 02:0a:1a:84:64:1f -[ 8.624238] e1000 0000:00:03.0 eth0: Intel(R) PRO/1000 Network Connection -[ 8.637597] sd 2:0:0:0: Attached scsi generic sg0 type 0 -[ 8.659760] sd 2:0:0:0: [sda] 20971520 512-byte logical blocks: (10.7 GB/10.0 GiB) -[ 8.667507] sd 2:0:1:0: Attached scsi generic sg1 type 0 -[ 8.670149] sd 2:0:1:0: [sdb] 20480 512-byte logical blocks: (10.5 MB/10.0 MiB) -[ 8.674424] sd 2:0:0:0: [sda] Write Protect is off -[ 8.677892] sd 2:0:0:0: [sda] Incomplete mode parameter data -[ 8.683078] sd 2:0:0:0: [sda] Assuming drive cache: write through -[ 8.713406] sd 2:0:1:0: [sdb] Write Protect is off -[ 8.729116] sd 2:0:1:0: [sdb] Incomplete mode parameter data -[ 8.734981] sd 2:0:1:0: [sdb] Assuming drive cache: write through -[ 8.760048] sda: sda1 -[ 8.795020] sd 2:0:1:0: [sdb] Attached SCSI disk -[ 8.812947] sd 2:0:0:0: [sda] Attached SCSI disk -[ 9.078572] e1000 0000:00:08.0 eth1: (PCI:33MHz:32-bit) 08:00:27:dc:c4:a9 -[ 9.102163] e1000 0000:00:08.0 eth1: Intel(R) PRO/1000 Network Connection -[ 9.163680] e1000 0000:00:08.0 enp0s8: renamed from eth1 -[ 9.251015] e1000 0000:00:03.0 enp0s3: renamed from eth0 -[ 10.399025] floppy0: no floppy controllers found -Begin: Loading e[ 11.774431] md: linear personality registered for level -1 -ssential drivers[ 11.843318] md: multipath personality registered for level -4 - ... [ 11.934929] md: raid0 personality registered for level 0 -[ 11.970493] md: raid1 personality registered for level 1 -[ 12.062428] raid6: sse2x1 gen() 8919 MB/s -[ 12.138543] raid6: sse2x1 xor() 6390 MB/s -[ 12.218462] raid6: sse2x2 gen() 9412 MB/s -[ 12.290568] raid6: sse2x2 xor() 6788 MB/s -[ 12.370866] raid6: sse2x4 gen() 13545 MB/s -[ 12.482760] raid6: sse2x4 xor() 9455 MB/s -[ 12.581192] raid6: using algorithm sse2x4 gen() 13545 MB/s -[ 12.658678] raid6: .... xor() 9455 MB/s, rmw enabled -[ 12.730240] raid6: using ssse3x2 recovery algorithm -[ 12.815985] xor: automatically using best checksumming function: -[ 12.958405] avx : 20264.000 MB/sec -[ 13.010642] async_tx: api initialized (async) -[ 13.110039] md: raid6 personality registered for level 6 -[ 13.126061] md: raid5 personality registered for level 5 -[ 13.128613] md: raid4 personality registered for level 4 -[ 13.143692] md: raid10 personality registered for level 10 -done. -Begin: Running /scripts/init-p[ 13.181408] Btrfs loaded -remount ... done. -Begin: Mounting root file system ... Begin: Running /scripts/local-top ... done. -Begin: Running /scripts/local-premount ... Scanning for Btrfs filesystems -done. -Warning: fsck not present, so sk[ 13.367496] EXT4-fs (sda1): mounted filesystem with ordered data mode. Opts: (null) -ipping root file system -done. -Begin: Running /scripts/local-bottom ... done. -Begin: Running /scripts/init-bottom ... done. -[ 14.155564] random: nonblocking pool is initialized -[ 14.644130] systemd[1]: systemd 229 running in system mode. (+PAM +AUDIT +SELINUX +IMA +APPARMOR +SMACK +SYSVINIT +UTMP +LIBCRYPTSETUP +GCRYPT +GNUTLS +ACL +XZ -LZ4 +SECCOMP +BLKID +ELFUTILS +KMOD -IDN) -[ 14.945444] systemd[1]: Detected virtualization oracle. -[ 15.044778] systemd[1]: Detected architecture x86-64. - -Welcome to [1mUbuntu 16.04.3 LTS[0m! - -[ 15.169075] systemd[1]: Set hostname to <ubuntu>. -[ 15.266787] systemd[1]: Initializing machine ID from random generator. -[ 15.304870] systemd[1]: Installed transient /etc/machine-id file. -[ 16.751994] systemd[1]: Listening on LVM2 poll daemon socket. -[[0;32m OK [0m] Listening on LVM2 poll daemon socket. -[ 16.851419] systemd[1]: Listening on Journal Socket. -[[0;32m OK [0m] Listening on Journal Socket. -[ 16.964823] systemd[1]: Listening on Device-mapper event daemon FIFOs. -[[0;32m OK [0m] Listening on Device-mapper event daemon FIFOs. -[ 17.059188] systemd[1]: Listening on Syslog Socket. -[[0;32m OK [0m] Listening on Syslog Socket. -[ 17.138691] systemd[1]: Reached target Swap. -[[0;32m OK [0m] Reached target Swap. -[ 17.199529] systemd[1]: Created slice User and Session Slice. -[[0;32m OK [0m] Created slice User and Session Slice. -[ 17.347136] systemd[1]: Started Forward Password Requests to Wall Directory Watch. -[[0;32m OK [0m] Started Forward Password Requests to Wall Directory Watch. -[ 17.506843] systemd[1]: Reached target User and Group Name Lookups. -[[0;32m OK [0m] Reached target User and Group Name Lookups. -[ 17.550821] systemd[1]: Listening on udev Kernel Socket. -[[0;32m OK [0m] Listening on udev Kernel Socket. -[ 17.603319] systemd[1]: Listening on LVM2 metadata daemon socket. -[[0;32m OK [0m] Listening on LVM2 metadata daemon socket. -[ 17.794444] systemd[1]: Reached target Encrypted Volumes. -[[0;32m OK [0m] Reached target Encrypted Volumes. -[ 17.930485] systemd[1]: Listening on udev Control Socket. -[[0;32m OK [0m] Listening on udev Control Socket. -[ 18.087218] systemd[1]: Listening on Journal Audit Socket. -[[0;32m OK [0m] Listening on Journal Audit Socket. -[ 18.162617] systemd[1]: Set up automount Arbitrary Executable File Formats File System Automount Point. -[[0;32m OK [0m] Set up automount Arbitrary Executab...ats File System Automount Point. -[ 18.174617] systemd[1]: Listening on /dev/initctl Compatibility Named Pipe. -[[0;32m OK [0m] Listening on /dev/initctl Compatibility Named Pipe. -[ 18.311576] systemd[1]: Listening on Journal Socket (/dev/log). -[[0;32m OK [0m] Listening on Journal Socket (/dev/log). -[ 18.415524] systemd[1]: Started Trigger resolvconf update for networkd DNS. -[[0;32m OK [0m] Started Trigger resolvconf update for networkd DNS. -[ 18.583316] systemd[1]: Created slice System Slice. -[[0;32m OK [0m] Created slice System Slice. -[ 18.637304] systemd[1]: Starting Monitoring of LVM2 mirrors, snapshots etc. using dmeventd or progress polling... - Starting Monitoring of LVM2 mirrors... dmeventd or progress polling... -[ 18.738910] systemd[1]: Reached target Slices. -[[0;32m OK [0m] Reached target Slices. -[ 18.768364] systemd[1]: Starting Uncomplicated firewall... - Starting Uncomplicated firewall... -[ 18.855443] systemd[1]: Starting Journal Service... - Starting Journal Service... -[ 18.967857] systemd[1]: Starting Set console keymap... - Starting Set console keymap... -[ 19.185523] systemd[1]: Starting Load Kernel Modules... - Starting Load Kernel Modules... -[ 19.305859] systemd[1]: Mounting Huge Pages File System... - Mounting Huge Pages File System... -[ 19.415940] systemd[1]: Starting Create list of required static device nodes for the current kernel... - Starting Create list of required st... nodes for the current kernel... -[ 19.600738] systemd[1]: Starting Nameserver information manager... -[ 19.661527] Loading iSCSI transport class v2.0-870. - Starting Nameserver information manager... -[ 19.785453] iscsi: registered transport (tcp) -[ 19.793227] systemd[1]: Created slice system-serial\x2dgetty.slice. -[[0;32m OK [0m] Created slice system-serial\x2dgetty.slice. -[ 19.839683] systemd[1]: Mounting Debug File System... -[ 19.857636] iscsi: registered transport (iser) - Mounting Debug File System... -[ 20.014522] systemd[1]: Starting Remount Root and Kernel File Systems... - Starting Remount Root and Kernel File Systems... -[ 20.155974] systemd[1]: Mounting POSIX Message Queue File System... -[ 20.246059] EXT4-fs (sda1): re-mounted. Opts: (null) - Mounting POSIX Message Queue File System... -[ 20.264531] systemd[1]: Mounted Debug File System. -[[0;32m OK [0m] Mounted Debug File System. -[ 20.285424] systemd[1]: Mounted Huge Pages File System. -[[0;32m OK [0m] Mounted Huge Pages File System. -[ 20.317025] systemd[1]: Mounted POSIX Message Queue File System. -[[0;32m OK [0m] Mounted POSIX Message Queue File System. -[ 20.470542] systemd[1]: Started Journal Service. -[[0;32m OK [0m] Started Journal Service. -[[0;32m OK [0m] Started Uncomplicated firewall. -[[0;32m OK [0m] Started Set console keymap. -[[0;32m OK [0m] Started Load Kernel Modules. -[[0;32m OK [0m] Started Create list of required sta...ce nodes for the current kernel. -[[0;32m OK [0m] Started Remount Root and Kernel File Systems. -[[0;32m OK [0m] Started Nameserver information manager. -[[0;32m OK [0m] Started LVM2 metadata daemon. - Starting Initial cloud-init job (pre-networking)... - Starting udev Coldplug all Devices... - Starting Load/Save Random Seed... - Starting Create Static Device Nodes in /dev... - Mounting FUSE Control File System... - Starting Apply Kernel Variables... - Starting Flush Journal to Persistent Storage... -[[0;32m OK [0m] Mounted FUSE Control File System. -[[0;32m OK [0m] Started Load/Save Random Seed. -[[0;32m OK [0m] Started udev Coldplug all Devices. -[[0;32m OK [0m] Started Apply Kernel Variables. -[[0;32m OK [0m] Started Monitoring of LVM2 mirrors,...ng dmeventd or progress polling. -[[0;32m OK [0m] Started Create Static Device Nodes in /dev. - Starting udev Kernel Device Manager... -[[0;32m OK [0m] Started Flush Journal to Persistent Storage. -[[0;32m OK [0m] Started udev Kernel Device Manager. -[[0;32m OK [0m] Started Dispatch Password Requests to Console Directory Watch. -[[0;32m OK [0m] Reached target Local File Systems (Pre). -[[0;32m OK [0m] Reached target Local File Systems. - Starting Tell Plymouth To Write Out Runtime Data... - Starting Commit a transient machine-id on disk... - Starting Set console font and keymap... - Starting LSB: AppArmor initialization... - Starting Create Volatile Files and Directories... -[[0;32m OK [0m] Started Tell Plymouth To Write Out Runtime Data. -[[0;32m OK [0m] Started Commit a transient machine-id on disk. -[[0;32m OK [0m] Started Create Volatile Files and Directories. -[[0;32m OK [0m] Found device /dev/ttyS0. -[[0;32m OK [0m] Reached target System Time Synchronized. - Starting Update UTMP about System Boot/Shutdown... -[[0;32m OK [0m] Started Set console font and keymap. -[[0;32m OK [0m] Created slice system-getty.slice. -[[0;32m OK [0m] Started Update UTMP about System Boot/Shutdown. -[[0;32m OK [0m] Listening on Load/Save RF Kill Switch Status /dev/rfkill Watch. -[[0;32m OK [0m] Started LSB: AppArmor initialization. -[ 24.971777] cloud-init[431]: Cloud-init v. 0.7.9 running 'init-local' at Tue, 30 Jan 2018 16:51:27 +0000. Up 24.51 seconds. -[[0;32m OK [0m] Started Initial cloud-init job (pre-networking). -[[0;32m OK [0m] Reached target Network (Pre). - Starting Raise network interfaces... -[[0;32m OK [0m] Started Raise network interfaces. - Starting Initial cloud-init job (metadata service crawler)... -[[0;32m OK [0m] Reached target Network. -[ 28.547009] cloud-init[956]: Cloud-init v. 0.7.9 running 'init' at Tue, 30 Jan 2018 16:51:28 +0000. Up 26.02 seconds. -[ 28.685263] cloud-init[956]: ci-info: +++++++++++++++++++++++++++++++++++++Net device info++++++++++++++++++++++++++++++++++++++ -[[ 28.836399] cloud-init[956]: ci-info: +--------+-------+---------------------------+---------------+-------+-------------------+ -[ 28.836906] cloud-init[956]: ci-info: | Device | Up | Address | Mask | Scope | Hw-Address | -[ 28.837036] cloud-init[956]: ci-info: +--------+-------+---------------------------+---------------+-------+-------------------+ -[ 28.837210] cloud-init[956]: ci-info: | enp0s8 | False | . | . | . | 08:00:27:dc:c4:a9 | -[ 28.837347] cloud-init[956]: ci-info: | lo | True | 127.0.0.1 | 255.0.0.0 | . | . | -[ 28.837468] cloud-init[956]: ci-info: | lo | True | ::1/128 | . | host | . | -[ 28.837585] cloud-init[956]: ci-info: | enp0s3 | True | 10.0.2.15 | 255.255.255.0 | . | 02:0a:1a:84:64:1f | -[ 28.837699] cloud-init[956]: ci-info: | enp0s3 | True | fe80::a:1aff:fe84:641f/64 | . | link | 02:0a:1a:84:64:1f | -[ 28.837816] cloud-init[956]: ci-info: +--------+-------+---------------------------+---------------+-------+-------------------+ -[ 28.837934] cloud-init[956]: ci-info: +++++++++++++++++++++++++++Route IPv4 info++++++++++++++++++++++++++++ -[ 28.838124] cloud-init[956]: ci-info: +-------+-------------+----------+---------------+-----------+-------+ -[ 28.838275] cloud-init[956]: ci-info: | Route | Destination | Gateway | Genmask | Interface | Flags | -[ 28.838426] cloud-init[956]: ci-info: +-------+-------------+----------+---------------+-----------+-------+ -[ 28.838546] cloud-init[956]: ci-info: | 0 | 0.0.0.0 | 10.0.2.2 | 0.0.0.0 | enp0s3 | UG | -[ 28.838665] cloud-init[956]: ci-info: | 1 | 10.0.2.0 | 0.0.0.0 | 255.255.255.0 | enp0s3 | U | -[ 28.838782] cloud-init[956]: ci-info: +-------+-------------+----------+---------------+-----------+-------+ -[ 28.838931] cloud-init[956]: Generating public/private rsa key pair. -[ 28.839112] cloud-init[956]: Your identification has been saved in /etc/ssh/ssh_host_rsa_key. -[ 28.839237] cloud-init[956]: Your public key has been saved in /etc/ssh/ssh_host_rsa_key.pub. -[ 28.839355] cloud-init[956]: The key fingerprint is: -[ 28.839471] cloud-init[956]: SHA256:Fm58C+WP/0kPC9H5bBcSTIAhRmntC106z8SwZdWHfFc root@ubuntu-xenial -[ 28.839587] cloud-init[956]: The key's randomart image is: -[ 28.839710] cloud-init[956]: +---[RSA 2048]----+ -[ 28.839827] cloud-init[956]: | .+o.o.o+..E| -[ 28.840042] cloud-init[956]: | .o.+ +o o.+| -[ 28.840173] cloud-init[956]: | . + X o .o| -[ 28.840289] cloud-init[956]: | + X o ... | -[ 28.840404] cloud-init[956]: | S O ..o. | -[0;32m OK [0m] Started Initial cloud-init job (metadata service crawler). -[ 28.840604] cloud-init[956]: | o + * ..o.| -[ 31.132783] cloud-init[956]: | o o o =| -[ 31.204853] cloud-init[956]: | . o *.| -[[0;32m OK [0m] Reached target Cloud-config availability. -[[0;32m OK [0m] Reached target System Initialization. -[[0;32m OK [0m] Started Timer to automatically fetch and run repair assertions. -[[0;32m OK [0m] Listening on ACPID Listen Socket. -[[0;32m OK [0m] Listening on D-Bus System Message Bus Socket. - Starting Socket activation for snappy daemon. -[[0;32m OK [0m] Started Timer to automatically refresh installed snaps. -[[0;32m OK [0m] Listening on UUID daemon activation socket. -[ 31.272426] cloud-init[956]: | ..+ .| -[ 31.608903] cloud-init[956]: +----[SHA256]-----+ -[ 31.656909] cloud-init[956]: Generating public/private dsa key pair. - Starting LXD - unix socket. -[ 31.735968] cloud-init[956]: Your identification has been saved in /etc/ssh/ssh_host_dsa_key. -[[0;32m OK [0m] Started ACPI Events Check. -[[0;32m OK [0m] Reached target Paths. -[[0;32m OK [0m] Started Daily Cleanup of Temporary Directories. -[[0;32m OK [0m] Reached target Network is Online. -[[0;32m OK [0m] Started Daily apt download activities. -[[0;32m OK [0m] Started Daily apt upgrade and clean activities. -[[0;32m OK [0m] Reached target Timers. -[ 31.876780] cloud-init[956]: Your public key has been saved in /etc/ssh/ssh_host_dsa_key.pub. -[ 32.222753] cloud-init[956]: The key fingerprint is: - Starting iSCSI initiator daemon (iscsid)... -[[0;32m OK [0m] Listening on Socket activation for snappy daemon. -[ 32.234628] cloud-init[956]: SHA256:uAm0hayjN6Kj0wEYm/gMfUsmsHF7AsgKbnJN0XLzFWQ root@ubuntu-xenial -[[0;32m OK [0m] Listening on LXD - unix socket. -[ 32.391624] cloud-init[956]: The key's randomart image is: -[ 32.468860] cloud-init[956]: +---[DSA 1024]----+ -[ 32.507629] [[0;32m OK [0m] Reached target Sockets. -[[0;32m OK [0m] Reached target Basic System. -cloud-init[956]: |o .o .E. | -[ 32.512594] cloud-init[956]: |B...o.+ .. | -[[0;32m OK [0m] Started Regular background program processing daemon. -[ 32.558248] cloud-init[956]: |*@ ++o.o . | -[[0;32m OK [0m] Started ACPI event daemon. - Starting LXD - container startup/shutdown... - Starting Accounts Service... -[[0;32m OK [0m] Started FUSE filesystem for LXC. -[ 32.622718] cloud-init[956]: |@o*o*o .. | -[ 32.674649] cloud-init[956]: |o*oBo.. S | -[ 32.684906] cloud-init[956]: | .+... o | - Starting /etc/rc.local Compatibility... -[[0;32m OK [0m[ 32.698857] cloud-init[956]: |o.o. o | -[ 32.711129] cloud-init[956]: |+o.. | -[ 32.711191] cloud-init[956]: |+o | -[ 32.711243] cloud-init[956]: +----[SHA256]-----+ -[ 32.711293] cloud-init[956]: Generating public/private ecdsa key pair. -[ 32.711342] cloud-init[956]: Your identification has been saved in /etc/ssh/ssh_host_ecdsa_key. -[ 32.711393] cloud-init[956]: Your public key has been saved in /etc/ssh/ssh_host_ecdsa_key.pub. -[ 32.711441] cloud-init[956]: The key fingerprint is: -[ 32.711487] cloud-init[956]: SHA256:V3BNCzXoxihne59WKi9/z+HmMTt4NXtTY/31IKEfbY0 root@ubuntu-xenial -[ 32.711534] cloud-init[956]: The key's randomart image is: -[ 32.711578] cloud-init[956]: +---[ECDSA 256]---+ -[ 32.711623] cloud-init[956]: | . o=+ | -[ 32.711672] cloud-init[956]: | o...o | -[ 32.711744] cloud-init[956]: | +. . | -[ 32.711800] cloud-init[956]: | . +.= | -[ 32.711884] cloud-init[956]: | S+.+ o o.| -[ 32.711934] cloud-init[956]: | .o + E+B| -[ 32.711981] cloud-init[956]: | o =oOX| -[ 32.712032] cloud-init[956]: | +.*BX| -[ 32.712082] cloud-init[956]: | **=B| -[ 32.712130] cloud-init[956]: +----[SHA256]-----+ -[ 32.712178] cloud-init[956]: Generating public/private ed25519 key pair. -[ 32.712227] cloud-init[956]: Your identification has been saved in /etc/ssh/ssh_host_ed25519_key. -[ 32.712276] cloud-init[956]: Your public key has been saved in /etc/ssh/ssh_host_ed25519_key.pub. -[ 32.712323] cloud-init[956]: The key fingerprint is: -[ 32.712374] cloud-init[956]: SHA256:TCW9KdrWvWNz8RdF1N968+YLSjmcpLjn0u2c+zoeVII root@ubuntu-xenial -[ 32.712423] cloud-init[956]: The key's randomart image is: -[ 32.712472] cloud-init[956]: +--[ED25519 256]--+ -[ 32.712522] cloud-init[956]: | ... .o| -[ 32.712571] cloud-init[956]: | oo o| -[ 32.712624] cloud-init[956]: | .E + . .o| -[ 32.712673] cloud-init[956]: | o. o o +| -[ 32.712721] cloud-init[956]: | oSo + ..| -[ 32.712771] cloud-init[956]: | . + * + oo.| -] Started Deferred execution scheduler. - Starting System Logging Service... -[[0;32m OK [0m] Started Unattended Upgrades Shutdown. - Starting Apply the settings specified in cloud-config... - Starting Login Service... -[[0;32m OK [0m] Started D-Bus System Message Bus. -[ 32.715966] cloud-init[956]: | o...B o ++| -[ 33.132306] cloud-init[956]: | ..oooX o =| -[ 33.173048] cloud-init[956]: | .+.oX== ++| -[ 33.181141] cloud-init[956]: +----[SHA256]-----+ - Starting LSB: MD monitoring daemon... - Starting Pollinate to seed the pseudo random number generator... - Starting LSB: Record successful boot for GRUB... - Starting Snappy daemon... -[[0;32m OK [0m] Started System Logging Service. -[[0;32m OK [0m] Started iSCSI initiator daemon (iscsid). -[[0;32m OK [0m] Started /etc/rc.local Compatibility. -[ 34.146312] cloud-init[1087]: Generating locales (this might take a while)... -[[0;32m OK [0m] Started Login Service. - Starting Login to default iSCSI targets... -[[0;32m OK [0m] Started LSB: Record successful boot for GRUB. -[[0;32m OK [0m] Started LSB: MD monitoring daemon. -[[0;32m OK [0m] Started Login to default iSCSI targets. -[[0;32m OK [0m] Reached target Remote File Systems (Pre). -[[0;32m OK [0m] Reached target Remote File Systems. - Starting LSB: automatic crash report generation... - Starting LSB: daemon to balance interrupts for SMP systems... - Starting LSB: VirtualBox Linux Additions... - Starting LSB: Set the CPU Frequency Scaling governor to "ondemand"... - Starting Permit User Sessions... -[[0;32m OK [0m] Started LSB: Set the CPU Frequency Scaling governor to "ondemand". -[[0;32m OK [0m] Started LSB: automatic crash report generation. -[[0;32m OK [0m] Started LSB: daemon to balance interrupts for SMP systems. - Starting Authenticate and Authorize Users to Run Privileged Tasks... -[[0;32m OK [0m] Started Permit User Sessions. - Starting Terminate Plymouth Boot Screen... - Starting Hold until boot process finishes up... -[[0;32m OK [0m] Started Terminate Plymouth Boot Screen. -[[0;32m OK [0m] Started Hold until boot process finishes up. -[[0;32m OK [0m] Started Serial Getty on ttyS0. - Starting Set console scheme... -[[0;32m OK [0m] Started Getty on tty1. -[[0;32m OK [0m] Reached target Login Prompts. -[[0;32m OK [0m] Started Set console scheme. -[[0;32m OK [0m] Started Authenticate and Authorize Users to Run Privileged Tasks. -[[0;32m OK [0m] Started Accounts Service. -[[0;32m OK [0m] Started LXD - container startup/shutdown. -[[0;32m OK [0m] Started LSB: VirtualBox Linux Additions. -[[0;32m OK [0m] Started Snappy daemon. - Starting Auto import assertions from block devices... -[[0;32m OK [0m] Started Pollinate to seed the pseudo random number generator. -[[0;32m OK [0m] Started Auto import assertions from block devices. -[ 37.445966] cloud-init[1087]: en_US.UTF-8... done -[ 37.556712] cloud-init[1087]: Generation complete. - Starting OpenBSD Secure Shell server... -[[0;32m OK [0m] Started OpenBSD Secure Shell server. -[[0;32m OK [0m] Reached target Multi-User System. -[[0;32m OK [0m] Reached target Graphical Interface. - Starting Update UTMP about System Runlevel Changes... -[[0;32m OK [0m] Started Update UTMP about System Runlevel Changes. - Stopping OpenBSD Secure Shell server... -[[0;32m OK [0m] Stopped OpenBSD Secure Shell server. - Starting OpenBSD Secure Shell server... -[[0;32m OK [0m] Started OpenBSD Secure Shell server. -[ 39.754403] cloud-init[1087]: Cloud-init v. 0.7.9 running 'modules:config' at Tue, 30 Jan 2018 16:51:36 +0000. Up 33.35 seconds. -[[0;32m OK [0m] Started Apply the settings specified in cloud-config. - Starting Execute cloud user/final scripts... -ci-info: no authorized ssh keys fingerprints found for user ubuntu. -<14>Jan 30 16:51:43 ec2: -<14>Jan 30 16:51:43 ec2: ############################################################# -<14>Jan 30 16:51:43 ec2: -----BEGIN SSH HOST KEY FINGERPRINTS----- -<14>Jan 30 16:51:43 ec2: 1024 SHA256:uAm0hayjN6Kj0wEYm/gMfUsmsHF7AsgKbnJN0XLzFWQ root@ubuntu-xenial (DSA) -<14>Jan 30 16:51:43 ec2: 256 SHA256:V3BNCzXoxihne59WKi9/z+HmMTt4NXtTY/31IKEfbY0 root@ubuntu-xenial (ECDSA) -<14>Jan 30 16:51:43 ec2: 256 SHA256:TCW9KdrWvWNz8RdF1N968+YLSjmcpLjn0u2c+zoeVII root@ubuntu-xenial (ED25519) -<14>Jan 30 16:51:43 ec2: 2048 SHA256:Fm58C+WP/0kPC9H5bBcSTIAhRmntC106z8SwZdWHfFc root@ubuntu-xenial (RSA) -<14>Jan 30 16:51:43 ec2: -----END SSH HOST KEY FINGERPRINTS----- -<14>Jan 30 16:51:43 ec2: ############################################################# ------BEGIN SSH HOST KEY KEYS----- -ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBIoyYmtsBMCbRtHiMa0p7CgWuuz3clO6yGZqCeyaQ+aZfXqlEgmLTffzNbhWi7thdz+1bsAzsNLVGU+4TxAw37w= root@ubuntu-xenial -ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOyO3RUFpuIylhV46QhTh3m14nub4m+PpOuk+2fRL4lA root@ubuntu-xenial -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDVq0sXTyWCbAJjhGIQEGIUyWnScI7/8BTtz4Wfz1uqzc4Lgv/NV9hv8Cz9TLnA2FD+4vF+LlMh/Th5PGrMf/F4HnPjM8lrklURlykcjHdHDI2/QdpURf8ukyrQQJXNVHrfma5MjPDD+4/9X+3WCIMtijsRs3GYojUK5Bama+vXyQaw22bdh3Hm6xgXH7zaMy+2/UBBtXETnR28IOlVgT4UM8Town7pd7G09IOL0i62bFA+L+SMWVwFI7gURxoD7oSSErd5AMtxYXVAQp/fJjObMexlMq6KBI5dzM0MFCl258jr97e8rQ7U9ylExzLDxvSrk2p4wYXvvwQmCjS5Bw/ root@ubuntu-xenial ------END SSH HOST KEY KEYS----- -[ 40.741090] cloud-init[1347]: Cloud-init v. 0.7.9 running 'modules:final' at Tue, 30 Jan 2018 16:51:42 +0000. Up 40.11 seconds. -[ 40.775511] cloud-init[1347]: ci-info: no authorized ssh keys fingerprints found for user ubuntu. -[ 40.780100] cloud-init[1347]: Cloud-init v. 0.7.9 finished at Tue, 30 Jan 2018 16:51:43 +0000. Datasource DataSourceNoCloud [seed=/dev/sdb][dsmode=net]. Up 40.73 seconds -[[0;32m OK [0m] Started Execute cloud user/final scripts. -[[0;32m OK [0m] Reached target Cloud-init target. - -Ubuntu 16.04.3 LTS ubuntu-xenial ttyS0 - -ubuntu-xenial login: \ No newline at end of file