Skip to content
Snippets Groups Projects
Commit 14deef5d authored by MJB's avatar MJB
Browse files

test

parent c80f165a
No related branches found
No related tags found
No related merge requests found
Showing
with 11 additions and 2404 deletions
......@@ -94,10 +94,10 @@ Vagrant.configure("2") do |config|
config.vm.network "forwarded_port", guest: 9092, host: 9092
# install the CLMC service
config.vm.provision :shell, :path => 'scripts/influx/install-clmc-service.sh'
config.vm.provision :shell, :path => 'scripts/clmc-service/install-clmc-service.sh'
# start the CLMC service
config.vm.provision :shell, :path => 'scripts/influx/start-clmc-service.sh'
config.vm.provision :shell, :path => 'scripts/clmc-service/start-clmc-service.sh'
end
# Apache Server 1
config.vm.define "apache1" do |my|
......@@ -113,10 +113,10 @@ Vagrant.configure("2") do |config|
# install the apache service
config.vm.provision :shell, :path => 'scripts/apache/install-apache.sh'
config.vm.provision :shell, :path => 'test/services/apache/install-apache.sh'
# Install CLMC agent 1
config.vm.provision :shell, :path => 'scripts/influx/install-clmc-agent.sh', :args => "/vagrant/scripts/apache/telegraf_apache_template.conf #{ipendpoints['apache1'][:location]} #{ipendpoints['apache1'][:sfc_id]} #{ipendpoints['apache1'][:sfc_id_instance]} #{ipendpoints['apache1'][:sf_id]} #{ipendpoints['apache1'][:sf_id_instance]} #{ipendpoints['apache1'][:ipendpoint_id]} #{ipendpoints['apache1'][:influxdb_url]} #{ipendpoints['apache1'][:database_name]}"
config.vm.provision :shell, :path => 'scripts/clmc-agent/install-clmc-agent.sh', :args => "/vagrant/test/services/apache/telegraf_apache_template.conf #{ipendpoints['apache1'][:location]} #{ipendpoints['apache1'][:sfc_id]} #{ipendpoints['apache1'][:sfc_id_instance]} #{ipendpoints['apache1'][:sf_id]} #{ipendpoints['apache1'][:sf_id_instance]} #{ipendpoints['apache1'][:ipendpoint_id]} #{ipendpoints['apache1'][:influxdb_url]} #{ipendpoints['apache1'][:database_name]}"
end
# Apache Server 2
config.vm.define "apache2" do |my|
......@@ -129,13 +129,13 @@ Vagrant.configure("2") do |config|
end
# open apache port
config.vm.network "forwarded_port", guest: 80, host: 8081
config.vm.network "forwarded_port", guest: 80, host: 8082
# install the apache service
config.vm.provision :shell, :path => 'scripts/apache/install-apache.sh'
config.vm.provision :shell, :path => 'test/services/apache/install-apache.sh'
# Install CLMC agent
config.vm.provision :shell, :path => 'scripts/influx/install-clmc-agent.sh', :args => "/vagrant/scripts/apache/telegraf_apache_template.conf #{ipendpoints['apache2'][:location]} #{ipendpoints['apache2'][:sfc_id]} #{ipendpoints['apache2'][:sfc_id_instance]} #{ipendpoints['apache2'][:sf_id]} #{ipendpoints['apache2'][:sf_id_instance]} #{ipendpoints['apache2'][:ipendpoint_id]} #{ipendpoints['apache2'][:influxdb_url]} #{ipendpoints['apache2'][:database_name]}"
config.vm.provision :shell, :path => 'scripts/clmc-agent/install-clmc-agent.sh', :args => "/vagrant/test/services/apache/telegraf_apache_template.conf #{ipendpoints['apache2'][:location]} #{ipendpoints['apache2'][:sfc_id]} #{ipendpoints['apache2'][:sfc_id_instance]} #{ipendpoints['apache2'][:sf_id]} #{ipendpoints['apache2'][:sf_id_instance]} #{ipendpoints['apache2'][:ipendpoint_id]} #{ipendpoints['apache2'][:influxdb_url]} #{ipendpoints['apache2'][:database_name]}"
end
# NGINX VM
......@@ -153,10 +153,10 @@ Vagrant.configure("2") do |config|
config.vm.network "forwarded_port", guest: 80, host: 8083
# install the apache service
config.vm.provision :shell, :path => 'scripts/nginx/install-nginx.sh'
config.vm.provision :shell, :path => 'test/services/nginx/install-nginx.sh'
# Install CLMC agent
config.vm.provision :shell, :path => 'scripts/influx/install-clmc-agent.sh', :args => "/vagrant/scripts/nginx/telegraf_nginx_template.conf #{ipendpoints['nginx1'][:location]} #{ipendpoints['nginx1'][:sfc_id]} #{ipendpoints['nginx1'][:sfc_id_instance]} #{ipendpoints['nginx1'][:sf_id]} #{ipendpoints['nginx1'][:sf_id_instance]} #{ipendpoints['nginx1'][:ipendpoint_id]} #{ipendpoints['nginx1'][:influxdb_url]} #{ipendpoints['nginx1'][:database_name]}"
config.vm.provision :shell, :path => 'scripts/clmc-agent/install-clmc-agent.sh', :args => "/vagrant/test/services/nginx/telegraf_nginx_template.conf #{ipendpoints['nginx1'][:location]} #{ipendpoints['nginx1'][:sfc_id]} #{ipendpoints['nginx1'][:sfc_id_instance]} #{ipendpoints['nginx1'][:sf_id]} #{ipendpoints['nginx1'][:sf_id_instance]} #{ipendpoints['nginx1'][:ipendpoint_id]} #{ipendpoints['nginx1'][:influxdb_url]} #{ipendpoints['nginx1'][:database_name]}"
end
# MONGODB VM
......@@ -173,10 +173,10 @@ Vagrant.configure("2") do |config|
config.vm.network "forwarded_port", guest: 80, host: 8084
# install the apache service
config.vm.provision :shell, :path => 'scripts/mongo/install-mongo.sh'
config.vm.provision :shell, :path => 'test/services/mongo/install-mongo.sh'
# Install CLMC agent
config.vm.provision :shell, :path => 'scripts/influx/install-clmc-agent.sh', :args => "/vagrant/scripts/mongo/telegraf_mongo_template.conf #{ipendpoints['mongo1'][:location]} #{ipendpoints['mongo1'][:sfc_id]} #{ipendpoints['mongo1'][:sfc_id_instance]} #{ipendpoints['mongo1'][:sf_id]} #{ipendpoints['mongo1'][:sf_id_instance]} #{ipendpoints['mongo1'][:ipendpoint_id]} #{ipendpoints['mongo1'][:influxdb_url]} #{ipendpoints['mongo1'][:database_name]}"
config.vm.provision :shell, :path => 'scripts/clmc-agent/install-clmc-agent.sh', :args => "/vagrant/test/services/mongo/telegraf_mongo_template.conf #{ipendpoints['mongo1'][:location]} #{ipendpoints['mongo1'][:sfc_id]} #{ipendpoints['mongo1'][:sfc_id_instance]} #{ipendpoints['mongo1'][:sf_id]} #{ipendpoints['mongo1'][:sf_id_instance]} #{ipendpoints['mongo1'][:ipendpoint_id]} #{ipendpoints['mongo1'][:influxdb_url]} #{ipendpoints['mongo1'][:database_name]}"
end
......
#!/bin/bash
#/////////////////////////////////////////////////////////////////////////
#//
#// (c) University of Southampton IT Innovation Centre, 2017
#//
#// Copyright in this software belongs to University of Southampton
#// IT Innovation Centre of Gamma House, Enterprise Road,
#// Chilworth Science Park, Southampton, SO16 7NS, UK.
#//
#// This software may not be used, sold, licensed, transferred, copied
#// or reproduced in whole or in part in any manner or form or in or
#// on any media by any person other than in accordance with the terms
#// of the Licence Agreement supplied with the software, or otherwise
#// without the prior written consent of the copyright owners.
#//
#// This software is distributed WITHOUT ANY WARRANTY, without even the
#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
#// PURPOSE, except where stated in the Licence Agreement supplied with
#// the software.
#//
#// Created By : Michael Boniface
#// Created Date : 23/01/2018
#// Created for Project : FLAME
#//
#/////////////////////////////////////////////////////////////////////////
# Install apache
sudo apt-get update
sudo apt-get -y install apache2
\ No newline at end of file
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
# Global tags can be specified here in key="value" format.
[global_tags]
# location of the data centre
location="{{LOCATION}}"
# media service template id
sfc="{{SFC_ID}}"
# media service instance
sfc_i="{{SFC_ID_INSTANCE}}"
# service function type
sf="{{SF_ID}}"
# service function instance id
sf_i="{{SF_ID_INSTANCE}}"
# ipendpoint id aka surrogate instance
ipendpoint="{{IP_ENDPOINT_ID}}"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will cache metric_buffer_limit metrics for each output, and will
## flush this buffer on a successful write.
metric_buffer_limit = 1000
## Flush the buffer whenever full, regardless of flush_interval.
flush_buffer_when_full = true
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. You shouldn't set this below
## interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Logging configuration:
## Run telegraf in debug mode
debug = false
## Run telegraf in quiet mode
quiet = false
## Specify the log file name. The empty string means to log to stdout.
logfile = "G:/Telegraf/telegraf.log"
## Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
urls = ["{{INFLUXDB_URL}}"] # required
# The target database for metrics (telegraf will create it if not exists)
database = "{{DATABASE_NAME}}" # required
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
# note: using second precision greatly helps InfluxDB compression
precision = "s"
## Write timeout (for the InfluxDB client), formatted as a string.
## If not provided, will default to 5s. 0s means no timeout (not recommended).
timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
###############################################################################
# INPUTS #
###############################################################################
# # Influx HTTP write listener
[[inputs.http_listener]]
## Address and port to host HTTP listener on
service_address = ":8186"
## timeouts
read_timeout = "10s"
write_timeout = "10s"
## HTTPS
#tls_cert= "/etc/telegraf/cert.pem"
#tls_key = "/etc/telegraf/key.pem"
## MTLS
#tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
# Read Apache status information (mod_status)
[[inputs.apache]]
## An array of URLs to gather from, must be directed at the machine
## readable version of the mod_status page including the auto query string.
## Default is "http://localhost/server-status?auto".
urls = ["http://localhost/server-status?auto"]
## Credentials for basic HTTP authentication.
# username = "myuser"
# password = "mypassword"
## Maximum time to receive response.
# response_timeout = "5s"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
\ No newline at end of file
#!/bin/bash
#/////////////////////////////////////////////////////////////////////////
#//
#// (c) University of Southampton IT Innovation Centre, 2017
#//
#// Copyright in this software belongs to University of Southampton
#// IT Innovation Centre of Gamma House, Enterprise Road,
#// Chilworth Science Park, Southampton, SO16 7NS, UK.
#//
#// This software may not be used, sold, licensed, transferred, copied
#// or reproduced in whole or in part in any manner or form or in or
#// on any media by any person other than in accordance with the terms
#// of the Licence Agreement supplied with the software, or otherwise
#// without the prior written consent of the copyright owners.
#//
#// This software is distributed WITHOUT ANY WARRANTY, without even the
#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
#// PURPOSE, except where stated in the Licence Agreement supplied with
#// the software.
#//
#// Created By : Michael Boniface
#// Created Date : 13/12/2017
#// Created for Project : FLAME
#//
#/////////////////////////////////////////////////////////////////////////
# Install telegraf
if [ "$#" -ne 9 ]; then
echo "Error: illegal number of arguments: "$#
echo "Usage: install-clmc-agent.sh TELEGRAF_CONF_FILE LOCATION SFC_ID SFC_ID_INSTANCE SF_ID SF_ID_INSTANCE IP_ENDPOINT_ID INFLUXDB_URL DATABASE_NAME"
exit
fi
TELEGRAF_CONF_FILE=$1
LOCATION=$2
SFC_ID=$3
SFC_ID_INSTANCE=$4
SF_ID=$5
SF_ID_INSTANCE=$6
IP_ENDPOINT_ID=$7
INFLUXDB_URL=$8
DATABASE_NAME=$9
if [ ! -f $TELEGRAF_CONF_FILE]; then
echo "Error: Telegraf conf template file not found: "$TELEGRAF_CONF_FILE
exit
fi
wget https://dl.influxdata.com/telegraf/releases/telegraf_1.3.2-1_amd64.deb
dpkg -i telegraf_1.3.2-1_amd64.deb
# Copy configuration
echo "Telegraf config file: " $TELEGRAF_CONF_FILE
cp $TELEGRAF_CONF_FILE /etc/telegraf/telegraf.conf
echo "INFLUXDB_URL: " $INFLUXDB_URL
echo "DATABASE_NAME: " $DATABASE_NAME
# Replace template parameters
sed -i 's/{{LOCATION}}/'$LOCATION'/g' /etc/telegraf/telegraf.conf
sed -i 's/{{SFC_ID}}/'$SFC_ID'/g' /etc/telegraf/telegraf.conf
sed -i 's/{{SFC_ID_INSTANCE}}/'$SFC_ID_INSTANCE'/g' /etc/telegraf/telegraf.conf
sed -i 's/{{SF_ID}}/'$SF_ID'/g' /etc/telegraf/telegraf.conf
sed -i 's/{{SF_ID_INSTANCE}}/'$SF_ID_INSTANCE'/g' /etc/telegraf/telegraf.conf
sed -i 's/{{IP_ENDPOINT_ID}}/'$IP_ENDPOINT_ID'/g' /etc/telegraf/telegraf.conf
sed -i 's|{{INFLUXDB_URL}}|'$INFLUXDB_URL'|g' /etc/telegraf/telegraf.conf
sed -i 's/{{DATABASE_NAME}}/'$DATABASE_NAME'/g' /etc/telegraf/telegraf.conf
# Start telegraf
systemctl start telegraf
\ No newline at end of file
#!/bin/bash
#/////////////////////////////////////////////////////////////////////////
#//
#// (c) University of Southampton IT Innovation Centre, 2017
#//
#// Copyright in this software belongs to University of Southampton
#// IT Innovation Centre of Gamma House, Enterprise Road,
#// Chilworth Science Park, Southampton, SO16 7NS, UK.
#//
#// This software may not be used, sold, licensed, transferred, copied
#// or reproduced in whole or in part in any manner or form or in or
#// on any media by any person other than in accordance with the terms
#// of the Licence Agreement supplied with the software, or otherwise
#// without the prior written consent of the copyright owners.
#//
#// This software is distributed WITHOUT ANY WARRANTY, without even the
#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
#// PURPOSE, except where stated in the Licence Agreement supplied with
#// the software.
#//
#// Created By : Michael Boniface
#// Created Date : 13/12/2017
#// Created for Project : FLAME
#//
#/////////////////////////////////////////////////////////////////////////
# install python for the simulator
apt-get update
apt-get -y install python
# install influx
wget https://dl.influxdata.com/influxdb/releases/influxdb_1.2.4_amd64.deb
dpkg -i influxdb_1.2.4_amd64.deb
# install kapacitor
wget https://dl.influxdata.com/kapacitor/releases/kapacitor_1.3.1_amd64.deb
dpkg -i kapacitor_1.3.1_amd64.deb
# install Chronograf
wget https://dl.influxdata.com/chronograf/releases/chronograf_1.3.3.0_amd64.deb
dpkg -i chronograf_1.3.3.0_amd64.deb
#!/bin/bash
#/////////////////////////////////////////////////////////////////////////
#//
#// (c) University of Southampton IT Innovation Centre, 2018
#//
#// Copyright in this software belongs to University of Southampton
#// IT Innovation Centre of Gamma House, Enterprise Road,
#// Chilworth Science Park, Southampton, SO16 7NS, UK.
#//
#// This software may not be used, sold, licensed, transferred, copied
#// or reproduced in whole or in part in any manner or form or in or
#// on any media by any person other than in accordance with the terms
#// of the Licence Agreement supplied with the software, or otherwise
#// without the prior written consent of the copyright owners.
#//
#// This software is distributed WITHOUT ANY WARRANTY, without even the
#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
#// PURPOSE, except where stated in the Licence Agreement supplied with
#// the software.
#//
#// Created By : Simon Crowle
#// Created Date : 03/11/2018
#// Created for Project : FLAME
#//
#/////////////////////////////////////////////////////////////////////////
echo Starting TICK stack services...
systemctl start influxdb
systemctl start kapacitor
systemctl start chronograf
\ No newline at end of file
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
# Global tags can be specified here in key="value" format.
[global_tags]
# location of the data centre
location="{{LOCATION}}"
# media service template id
sfc="{{SFC_ID}}"
# media service instance
sfc_i="{{SFC_ID_INSTANCE}}"
# service function type
sf="{{SF_ID}}"
# service function instance id
sf_i="{{SF_ID_INSTANCE}}"
# ipendpoint id aka surrogate instance
ipendpoint="{{IP_ENDPOINT_ID}}"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will cache metric_buffer_limit metrics for each output, and will
## flush this buffer on a successful write.
metric_buffer_limit = 1000
## Flush the buffer whenever full, regardless of flush_interval.
flush_buffer_when_full = true
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. You shouldn't set this below
## interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Logging configuration:
## Run telegraf in debug mode
debug = false
## Run telegraf in quiet mode
quiet = false
## Specify the log file name. The empty string means to log to stdout.
logfile = "G:/Telegraf/telegraf.log"
## Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
urls = ["{{INFLUXDB_URL}}"] # required
# The target database for metrics (telegraf will create it if not exists)
database = "{{DATABASE_NAME}}" # required
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
# note: using second precision greatly helps InfluxDB compression
precision = "s"
## Write timeout (for the InfluxDB client), formatted as a string.
## If not provided, will default to 5s. 0s means no timeout (not recommended).
timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
###############################################################################
# INPUTS #
###############################################################################
# # Influx HTTP write listener
[[inputs.http_listener]]
## Address and port to host HTTP listener on
service_address = ":8186"
## timeouts
read_timeout = "10s"
write_timeout = "10s"
## HTTPS
#tls_cert= "/etc/telegraf/cert.pem"
#tls_key = "/etc/telegraf/key.pem"
## MTLS
#tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
#!/bin/bash
#/////////////////////////////////////////////////////////////////////////
#//
#// (c) University of Southampton IT Innovation Centre, 2017
#//
#// Copyright in this software belongs to University of Southampton
#// IT Innovation Centre of Gamma House, Enterprise Road,
#// Chilworth Science Park, Southampton, SO16 7NS, UK.
#//
#// This software may not be used, sold, licensed, transferred, copied
#// or reproduced in whole or in part in any manner or form or in or
#// on any media by any person other than in accordance with the terms
#// of the Licence Agreement supplied with the software, or otherwise
#// without the prior written consent of the copyright owners.
#//
#// This software is distributed WITHOUT ANY WARRANTY, without even the
#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
#// PURPOSE, except where stated in the Licence Agreement supplied with
#// the software.
#//
#// Created By : Michael Boniface
#// Created Date : 23/01/2018
#// Created for Project : FLAME
#//
#/////////////////////////////////////////////////////////////////////////
# Install apache
sudo apt-get update
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 2930ADAE8CAF5059EE73BB4B58712A2291FA4AD5
echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.6 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.6.list
sudo apt-get update
sudo apt-get install -y mongodb-org
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
# Global tags can be specified here in key="value" format.
[global_tags]
# location of the data centre
location="{{LOCATION}}"
# media service template id
sfc="{{SFC_ID}}"
# media service instance
sfc_i="{{SFC_ID_INSTANCE}}"
# service function type
sf="{{SF_ID}}"
# service function instance id
sf_i="{{SF_ID_INSTANCE}}"
# ipendpoint id aka surrogate instance
ipendpoint="{{IP_ENDPOINT_ID}}"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will cache metric_buffer_limit metrics for each output, and will
## flush this buffer on a successful write.
metric_buffer_limit = 1000
## Flush the buffer whenever full, regardless of flush_interval.
flush_buffer_when_full = true
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. You shouldn't set this below
## interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Logging configuration:
## Run telegraf in debug mode
debug = false
## Run telegraf in quiet mode
quiet = false
## Specify the log file name. The empty string means to log to stdout.
logfile = "G:/Telegraf/telegraf.log"
## Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
urls = ["{{INFLUXDB_URL}}"] # required
# The target database for metrics (telegraf will create it if not exists)
database = "{{DATABASE_NAME}}" # required
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
# note: using second precision greatly helps InfluxDB compression
precision = "s"
## Write timeout (for the InfluxDB client), formatted as a string.
## If not provided, will default to 5s. 0s means no timeout (not recommended).
timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
###############################################################################
# INPUTS #
###############################################################################
# # Influx HTTP write listener
[[inputs.http_listener]]
## Address and port to host HTTP listener on
service_address = ":8186"
## timeouts
read_timeout = "10s"
write_timeout = "10s"
## HTTPS
#tls_cert= "/etc/telegraf/cert.pem"
#tls_key = "/etc/telegraf/key.pem"
## MTLS
#tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
[[inputs.mongodb]]
## An array of URLs of the form:
## "mongodb://" [user ":" pass "@"] host [ ":" port]
## For example:
## mongodb://user:auth_key@10.10.3.30:27017,
## mongodb://10.10.3.33:18832,
servers = ["mongodb://127.0.0.1:27017"]
gather_perdb_stats = false
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
\ No newline at end of file
#!/bin/bash
#/////////////////////////////////////////////////////////////////////////
#//
#// (c) University of Southampton IT Innovation Centre, 2017
#//
#// Copyright in this software belongs to University of Southampton
#// IT Innovation Centre of Gamma House, Enterprise Road,
#// Chilworth Science Park, Southampton, SO16 7NS, UK.
#//
#// This software may not be used, sold, licensed, transferred, copied
#// or reproduced in whole or in part in any manner or form or in or
#// on any media by any person other than in accordance with the terms
#// of the Licence Agreement supplied with the software, or otherwise
#// without the prior written consent of the copyright owners.
#//
#// This software is distributed WITHOUT ANY WARRANTY, without even the
#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
#// PURPOSE, except where stated in the Licence Agreement supplied with
#// the software.
#//
#// Created By : Michael Boniface
#// Created Date : 23/01/2018
#// Created for Project : FLAME
#//
#/////////////////////////////////////////////////////////////////////////
# Install apache
sudo apt-get update
yes Y | sudo apt-get install nginx
\ No newline at end of file
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
# Global tags can be specified here in key="value" format.
[global_tags]
# location of the data centre
location="{{LOCATION}}"
# media service template id
sfc="{{SFC_ID}}"
# media service instance
sfc_i="{{SFC_ID_INSTANCE}}"
# service function type
sf="{{SF_ID}}"
# service function instance id
sf_i="{{SF_ID_INSTANCE}}"
# ipendpoint id aka surrogate instance
ipendpoint="{{IP_ENDPOINT_ID}}"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will cache metric_buffer_limit metrics for each output, and will
## flush this buffer on a successful write.
metric_buffer_limit = 1000
## Flush the buffer whenever full, regardless of flush_interval.
flush_buffer_when_full = true
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. You shouldn't set this below
## interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Logging configuration:
## Run telegraf in debug mode
debug = false
## Run telegraf in quiet mode
quiet = false
## Specify the log file name. The empty string means to log to stdout.
logfile = "G:/Telegraf/telegraf.log"
## Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
urls = ["{{INFLUXDB_URL}}"] # required
# The target database for metrics (telegraf will create it if not exists)
database = "{{DATABASE_NAME}}" # required
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
# note: using second precision greatly helps InfluxDB compression
precision = "s"
## Write timeout (for the InfluxDB client), formatted as a string.
## If not provided, will default to 5s. 0s means no timeout (not recommended).
timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
###############################################################################
# INPUTS #
###############################################################################
# # Influx HTTP write listener
[[inputs.http_listener]]
## Address and port to host HTTP listener on
service_address = ":8186"
## timeouts
read_timeout = "10s"
write_timeout = "10s"
## HTTPS
#tls_cert= "/etc/telegraf/cert.pem"
#tls_key = "/etc/telegraf/key.pem"
## MTLS
#tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
# Read Nginx's basic status information (ngx_http_stub_status_module)
[[inputs.nginx]]
## An array of Nginx stub_status URI to gather stats.
urls = ["http://localhost/server_status"]
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## HTTP response timeout (default: 5s)
response_timeout = "5s"
\ No newline at end of file
# line protocol
# Method to create a full InfluxDB request statement (based on partial statement from client)
import uuid
from random import random, randint
# Reports TX and RX, scaling on requested quality
def generate_network_report(recieved_bytes, sent_bytes, time):
# Measurement
result = 'net_port_io'
# Tags
result += ',port_id=enps03 '
# Fields
result += 'RX_BYTES_PORT_M=' + str(recieved_bytes) + ","
result += 'TX_BYTES_PORT_M=' + str(sent_bytes)
# Timestamp
result += ' ' + str(_getNSTime(time))
# Measurement
#print(result)
return result
# Formats VM config
def generate_vm_config(state, cpu, mem, storage, time):
# metric
result = 'vm_res_alloc'
# Tags
result += ',vm_state=' + quote_wrap(state)
result += ' '
# Fields
result += 'cpu=' + str(cpu)
result += ',memory=' + quote_wrap(mem)
result += ',storage=' + quote_wrap(storage)
# Time
result += ' ' + str(_getNSTime(time))
print(result)
return result
# Reports cpu usage, scaling on requests
def generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, time):
result = 'cpu_usage'
# Tag
result += ' '
# field
result += 'cpu_usage='+str(cpu_usage)
result += ',cpu_active_time='+str(cpu_active_time)
result += ',cpu_idle_time='+str(cpu_idle_time)
result += ' '
# Time
result += str(_getNSTime(time))
print(result)
return result
# Reports response times, scaling on number of requests
def generate_mpegdash_report(resource, requests, avg_response_time, peak_response_time, time):
# Measurement
result = 'mpegdash_service'
# Tags
result += ',cont_nav=\"' + str(resource) + "\" "
# Fields
# result += 'cont_rep=' + str(quality) + ','
result += 'requests=' + str(requests) + ','
result += 'avg_response_time=' + str(avg_response_time) + ','
result += 'peak_response_time=' + str(peak_response_time)
# Timestamp
result += ' ' + str(_getNSTime(time))
print(result)
return result
#ipendpoint_route,ipendpoint_id,cont_nav=FQDN HTTP_REQUESTS_FQDN_M, NETWORK_FQDN_LATENCY timestamp
def generate_ipendpoint_route(resource, requests, latency, time):
# Measurement
result = 'ipendpoint_route'
# Tags
result += ',cont_nav=\"' + str(resource) + "\" "
# Fields
# result += 'cont_rep=' + str(quality) + ','
result += 'http_requests_fqdn_m=' + str(requests) + ','
result += 'network_fqdn_latency=' + str(latency)
# Timestamp
result += ' ' + str(_getNSTime(time))
#print(result)
return result
# Influx needs strings to be quoted, this provides a utility interface to do this
def quote_wrap(str):
return "\"" + str + "\""
# InfluxDB likes to have time-stamps in nanoseconds
def _getNSTime(time):
# Convert to nano-seconds
timestamp = int(1000000000*time)
#print("timestamp", timestamp)
return timestamp
# DEPRICATED
# ____________________________________________________________________________
# DEPRICATED: old structure, not part of new spec
def _generateClientRequest(cReq, id, time):
# Tags first
result = 'sid="' + str(id) + '",' + cReq
# Fields
# No additional fields here yet
# Timestamp
result += ' ' + str(_getNSTime(time))
# Measurement
return 'request,' + result
# Method to create a full InfluxDB response statement
# DEPRECATED: old structure, not part of new spec
def _generateServerResponse(reqID, quality, time, cpuUsage, qualityDifference):
# Tags first
result = ' '
# Fields
result += 'quality=' + str(quality) + ','
result += 'cpuUsage=' + str(cpuUsage) + ','
result += 'qualityDifference=' + str(qualityDifference) + ','
result += 'requestID="' + str(reqID) + '",'
result += 'index="' + str(uuid.uuid4()) + '"'
# Timestamp
result += ' ' + str(_getNSTime(time))
# Measurement
# print('response'+result)
return 'response' + result
# Formats server config
def _generateServerConfig(ID, location, cpu, mem, storage, time):
# metric
result = 'host_resource'
# Tags
result += ',slice_id=' + quote_wrap(ID)
result += ',location=' + quote_wrap(location)
result += ' '
# Fields
result += 'cpu=' + str(cpu)
result += ',memory=' + quote_wrap(mem)
result += ',storage=' + quote_wrap(storage)
# Time
result += ' ' + str(_getNSTime(time))
print(result)
return result
# Format port config
def _configure_port(port_id, state, rate, time):
# metric
result = 'net_port_config '
# Fields
result += 'port_id=' + quote_wrap('enps' + port_id)
result += ',port_state=' + quote_wrap(state)
result += ',tx_constraint=' + quote_wrap(rate)
result += ' '
# Time
result += ' ' + str(_getNSTime(time))
print(result)
return result
# Format service function config
def _configure_service_function(state, max_connected_clients):
# measurement
result = 'mpegdash_service_config'
# tags
result += ',service_state='+quote_wrap(state)
result += ' '
# fields
result += 'max_connected_clients='+str(max_connected_clients)
return result
# Reports memory usage, scaling on requests
def generate_mem_report(requests, total_mem, time):
# Measurement
result = 'mem'
result += ' '
# field
used = randint(0, min(100,5*requests))
available = 100-used
result += 'available_percent='+str(available)
result += ',used_percent='+str(used)
result += ',total='+str(total_mem)
result += ' '
# Time
result += str(_getNSTime(time))
print(result)
return result
# Formats compute node config
def generate_compute_node_config(slice_id, location, node_id, cpus, mem, storage, time):
# Measurement
result = 'compute_node_config'
# CommonContext Tag
result += ',slide_id='+quote_wrap(slice_id)
# Tag
result += ',location='+quote_wrap(location)
result += ',comp_node_id='+quote_wrap(node_id)
result += ' '
# field
result += 'cpus='+str(cpus)
result += ',memory='+str(mem)
result += ',storage='+str(storage)
result += ' '
# Time
result += str(_getNSTime(time))
print(result)
return result
# Formats network resource config
def generate_network_resource_config(slice_id, network_id, bandwidth, time):
# Measurement
result = 'network_resource_config'
# Meta Tag
result += ',slice_id='+quote_wrap(slice_id)
# Tag
result += 'network_id='+quote_wrap(network_id)
result += ' '
# field
result += 'bandwidth='+str(bandwidth)
result += ' '
# Time
result += str(_getNSTime(time))
print(result)
return result
# Formats network interface config
def generate_network_interface_config(slice_id, comp_node_id, port_id, rx_constraint, tx_constraint, time):
# Measurement
result = 'network_interface_config'
# Meta Tag
result += ',slice_id'+quote_wrap(slice_id)
# Tags
result += ',comp_node_id='+quote_wrap(comp_node_id)
result += ',port_id='+quote_wrap(port_id)
result += ' '
# field
result += 'rx_constraint='+str(rx_constraint)
result += ',tx_constraint='+str(tx_constraint)
result += ' '
# Time
result += str(_getNSTime(time))
print(result)
return result
# Format SF instance config
def generate_sf_instance_surrogate_config(loc, sfc, sfc_i, sf_package, sf_i, cpus, mem, storage, time):
# Measurement
result = 'sf_instance_surrogate_config'
# Meta Tag
result += ',location'+quote_wrap(loc)
result += ',sfc'+quote_wrap(sfc)
result += ',sfc_i'+quote_wrap(sfc_i)
result += ',sf_package'+quote_wrap(sf_package)
result += ',sf_i'+quote_wrap(sf_i)
result += ' '
# field
result += 'cpus='+str(cpus)
result += ',memory='+str(mem)
result += ',storage='+str(storage)
result += ' '
# Time
result += str(_getNSTime(time))
print(result)
return result
# Formats context container as part of other line protocol generators
def service_function_measurement(measurement, service_function_context):
result = measurement
result += ',sfc'+quote_wrap(service_function_context.sfc)
result += ',sfc_i'+quote_wrap(service_function_context.sfc_i)
result += ',sf_package'+quote_wrap(service_function_context.sf_package)
result += ',sf_i'+quote_wrap(service_function_context.sf_i)
return result
# coding: utf-8
## ///////////////////////////////////////////////////////////////////////
##
## © University of Southampton IT Innovation Centre, 2018
##
## Copyright in this software belongs to University of Southampton
## IT Innovation Centre of Gamma House, Enterprise Road,
## Chilworth Science Park, Southampton, SO16 7NS, UK.
##
## This software may not be used, sold, licensed, transferred, copied
## or reproduced in whole or in part in any manner or form or in or
## on any media by any person other than in accordance with the terms
## of the Licence Agreement supplied with the software, or otherwise
## without the prior written consent of the copyright owners.
##
## This software is distributed WITHOUT ANY WARRANTY, without even the
## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE, except where stated in the Licence Agreement supplied with
## the software.
##
## Created By : Simon Crowle
## Created Date : 03-01-2018
## Created for Project : FLAME
##
##///////////////////////////////////////////////////////////////////////
from random import random, randint
import math
import time
import datetime
import uuid
import urllib.parse
import urllib.request
import LineProtocolGenerator as lp
# DemoConfig is a configuration class used to set up the simulation
class DemoConfig(object):
def __init__(self):
self.LOG_DATA = False # Log data sent to INFLUX if true
self.ITERATION_STRIDE = 10 # Number of seconds of requests/responses sent to INFLUXDB per HTTP POST
self.SEG_LENGTH = 4 # Each MPEG segment encodes 5 seconds worth of frames (assume double-buffering)
self.MAX_SEG = (30 * 60) / (self.SEG_LENGTH + 1) # 30 mins
self.MIN_QUALITY = 5 # Minimum quality requested by a client
self.MAX_QUALITY = 9 # Maximum quality requested by a client
self.MIN_SERV_RESP_TIME = 100 # Mininum time taken for server to respond to a request (ms)
self.CLIENT_START_DELAY_MAX = 360 # Randomly delay clients starting stream up to 3 minutes
dc = DemoConfig()
# DemoClient is a class the simulations the behaviour of a single client requesting video from the server
class DemoClient(object):
def __init__(self):
self.startRequestOffset = randint(0,
dc.CLIENT_START_DELAY_MAX) # Random time offset before requesting 1st segment
self.numSegRequests = dc.MAX_SEG - randint(0, 50) # Randomly stop client watching all of video
self.id = uuid.uuid4() # Client's ID
self.currSeg = 1 # Client's current segment
self.nextSegCountDown = 0 # Count-down before asking for next segment
self.qualityReq = randint(dc.MIN_QUALITY, dc.MAX_QUALITY) # Randomly assigned quality for this client
self.lastReqID = None # ID used to track last request made by this client
def getQuality(self):
return self.qualityReq
def getLastRequestID(self):
return self.lastReqID
def iterateRequest(self):
result = None
# If the time offset before asking for 1st segment is through and there are more segments to get
# and it is time to get one, then create a request for one!
if (self.startRequestOffset == 0):
if (self.numSegRequests > 0):
if (self.nextSegCountDown == 0):
# Generate a request ID
self.lastReqID = uuid.uuid4()
# Start building the InfluxDB statement
# tags first
result = 'cid="' + str(self.id) + '",'
result += 'segment=' + str(self.currSeg) + ' '
# then fields
result += 'quality=' + str(self.qualityReq) + ','
result += 'index="' + str(self.lastReqID) + '"'
# Update this client's segment tracking
self.currSeg += 1
self.numSegRequests -= 1
self.nextSegCountDown = dc.SEG_LENGTH
else:
self.nextSegCountDown -= 1
else:
self.startRequestOffset -= 1
# Return the _partial_ InfluxDB statement (server will complete the rest)
return result
# Used to tell influx to launch or teardown a database (DB name overwritten by telegraf)
class DatabaseManager:
def __init__(self, influx_url, db_name):
self.influx_url = influx_url
self.influx_db = db_name
def database_up(self):
self._createDB()
def database_teardown(self):
self._deleteDB()
def _createDB(self):
self._sendInfluxQuery('CREATE DATABASE ' + self.influx_db)
def _deleteDB(self):
self._sendInfluxQuery('DROP DATABASE ' + self.influx_db)
def _sendInfluxQuery(self, query):
query = urllib.parse.urlencode({'q': query})
query = query.encode('ascii')
req = urllib.request.Request(self.influx_url + '/query ', query)
urllib.request.urlopen(req)
# Used to allocate clients to servers
class ClientManager:
def __init__(self, servers):
self.servers = servers
def generate_new_clients(self, amount):
assigned_count = 0
while(assigned_count < amount):
for server in self.servers:
if(assigned_count < amount):
server.assign_client(DemoClient())
assigned_count += 1
# Simulates nodes not connected directly to clients (e.g. telegraf)
class Node:
def __init__(self, influxurl, influxdb, input_cpu):
self.influx_url = influxurl
self.influx_db = influxdb
self.report_cpu = input_cpu
def iterateService(self):
if self.report_cpu:
self._sendInfluxData(lp.generate_CPU_report(0))
self._sendInfluxData(lp.generate_mem_report(10, 0))
# Private Methods
# ________________________________________________________________
# This is duplicated from DemoServer, should probably be refactored
def _sendInfluxData(self, data):
data = data.encode()
header = {'Content-Type': 'application/octet-stream'}
req = urllib.request.Request(self.influx_url + '/write?db=' + self.influx_db, data, header)
urllib.request.urlopen(req)
# Container for common SF tags, used as part of generating SF usage reports
# DemoServer is the class that simulates the behaviour of the MPEG-DASH server
class DemoServer(object):
def __init__(self, si, db_url, db_name, server_id, server_location):
self.influxDB = db_name # InfluxDB database name
self.id = uuid.uuid4() # MPEG-DASH server ID
self.simIterations = si # Number of iterations to make for this simulation
self.influxURL = db_url # InfluxDB connection URL
self.currentTime = int(round(time.time() * 1000)) # The current time
self._configure(server_id, server_location)
self.clients = []
def shutdown(self):
print("Shutting down")
self.configure_VM('stopping')
def assign_client(self, new_client):
self.clients.append(new_client)
print('Number of clients: ' + str(len(self.clients)))
def configure_server(self, server_id, server_location):
print("Configuring Servers")
server_conf_block = []
server_conf_block.append(lp._generateServerConfig(server_id, server_location, 8, '100G', '1T',
self._selectDelay(0)))
#ids = ['A', 'B', 'C']
#locations = ['locA', 'locB', 'locC']
#for i, id in enumerate(ids):
# server_conf_block.append(
# lp._generateServerConfig(id, locations[i], 8, '100G', '1T', self._selectDelay(len(ids))))
self._sendInfluxDataBlock(server_conf_block)
def configure_VM(self, state):
print("Configuring VM node")
self._sendInfluxData(self._generateVM(state, 1))
def configure_ports(self):
print("Configuring Servers")
server_conf_block = []
for i in range(0, 10):
server_conf_block.append(lp._configure_port())
self._sendInfluxDataBlock(server_conf_block)
def shutdown_VM(self):
print("Shutting down VM nodes")
VM_conf_block = []
self._generateVMS('stopping', 10, VM_conf_block)
self._sendInfluxDataBlock(VM_conf_block)
def iterateService(self):
# The simulation will run through 'X' iterations of the simulation
# each time this method is called. This allows request/response messages to be
# batched and sent to the InfluxDB in sensible sized blocks
return self._executeServiceIteration(dc.ITERATION_STRIDE)
def _executeServiceIteration(self, count):
requestBlock = []
responseBlock = []
networkBlock = []
SFBlock = []
totalDifference = sumOfclientQuality = percentageDifference = 0
# Keep going until this stride (count) completes
while (count > 0):
count -= 1
# Check we have some iterations to do
if (self.simIterations > 0):
# First record clients that request segments
clientsRequesting = []
# Run through all clients and see if they make a request
for client in self.clients:
# Record request, if it was generated
cReq = client.iterateRequest()
if cReq is not None:
clientsRequesting.append(client)
requestBlock.append(lp._generateClientRequest(cReq, self.id, self.currentTime))
# Now generate request statistics
clientReqCount = len(clientsRequesting)
# Create a single CPU usage metric for this iteration
cpuUsagePercentage = self._cpuUsage(clientReqCount)
# Now generate responses, based on stats
for client in clientsRequesting:
# Generate some quality and delays based on the number of clients requesting for this iteration
qualitySelect = self._selectQuality(client.getQuality(), clientReqCount)
delaySelect = self._selectDelay(clientReqCount) + self.currentTime
qualityDifference = client.getQuality() - qualitySelect
totalDifference += qualityDifference
# print('totalDifference = ' + str(totalDifference) +'\n')
sumOfclientQuality += client.getQuality()
# print('sumOfclientQuality = ' + str(sumOfclientQuality) + '\n')
percentageDifference = int((totalDifference * 100) / sumOfclientQuality)
# print('percentageOfQualityDifference = ' + str(percentageDifference) + '%')
responseBlock.append(lp._generateServerResponse(client.getLastRequestID(), qualitySelect,
delaySelect, cpuUsagePercentage,
percentageDifference))
SFBlock.append(lp._generateMpegDashReport('https://netflix.com/scream', qualitySelect, delaySelect))
networkBlock.append(lp._generateNetworkReport(sumOfclientQuality, delaySelect))
# Iterate the service simulation
self.simIterations -= 1
self.currentTime += 1000 # advance 1 second
# If we have some requests/responses to send to InfluxDB, do it
if (len(requestBlock) > 0 and len(responseBlock) > 0):
self._sendInfluxDataBlock(requestBlock)
self._sendInfluxDataBlock(responseBlock)
self._sendInfluxDataBlock(networkBlock)
self._sendInfluxDataBlock(SFBlock)
print("Sending influx data blocks")
return self.simIterations
def _generateVM(self, state, delay):
return lp._generateVMConfig(state, 1, '100G', '1T', self._selectDelay(delay))
# 'Private' methods ________________________________________________________
def _configure(self, server_id, server_location):
print("Configuring")
self.configure_VM('starting')
self.configure_VM('running')
#time.sleep(0.1)
self.configure_server(server_id, server_location)
self._sendInfluxData(lp._configure_port('01', 'running', '1GB/s', self.currentTime))
self._sendInfluxData(lp._configure_service_function('starting', 100))
#time.sleep(0.1)
self._sendInfluxData(lp._configure_service_function('running', 100))
def _cpuUsage(self, clientCount):
cpuUsage = randint(0, 10)
if (clientCount < 20):
cpuUsage += 5
elif (clientCount >= 20 and clientCount < 40):
cpuUsage += 10
elif (clientCount >= 40 and clientCount < 60):
cpuUsage += 15
elif (clientCount >= 60 and clientCount < 80):
cpuUsage += 20
elif (clientCount >= 80 and clientCount < 110):
cpuUsage += 30
elif (clientCount >= 110 and clientCount < 150):
cpuUsage += 40
elif (clientCount >= 150 and clientCount < 200):
cpuUsage += 55
elif (clientCount >= 200 and clientCount < 300):
cpuUsage += 70
elif (clientCount >= 300):
cpuUsage += 90
return cpuUsage
# Rule to determine a response quality, based on the current number of clients requesting
def _selectQuality(self, expectedQuality, clientCount):
result = dc.MAX_QUALITY
if (clientCount < 50):
result = 8
elif (clientCount >= 50 and clientCount < 100):
result = 7
elif (clientCount >= 100 and clientCount < 150):
result = 6
elif (clientCount >= 150 and clientCount < 200):
result = 5
elif (clientCount >= 200 and clientCount < 250):
result = 4
elif (clientCount >= 250 and clientCount < 300):
result = 3
elif (clientCount >= 300):
result = 2
# Give the client what it wants if possible
if (result > expectedQuality):
result = expectedQuality
return result
# Rule to determine a delay, based on the current number of clients requesting
def _selectDelay(self, cCount):
result = dc.MIN_SERV_RESP_TIME
if (cCount < 50):
result = 150
elif (cCount >= 50 and cCount < 100):
result = 200
elif (cCount > 100 and cCount < 200):
result = 500
elif (cCount >= 200):
result = 1000
# Perturb the delay a bit
result += randint(0, 20)
return result
# InfluxDB data send methods
# -----------------------------------------------------------------------------------------------
def _sendInfluxData(self, data):
data = data.encode()
header = {'Content-Type': 'application/octet-stream'}
req = urllib.request.Request(self.influxURL + '/write?db=' + self.influxDB, data, header)
urllib.request.urlopen(req)
def _sendInfluxDataBlock(self, dataBlock):
msg = ''
for stmt in dataBlock:
msg += stmt + '\n'
try:
if (dc.LOG_DATA == True):
print(msg)
self._sendInfluxData(msg)
except urllib.error.HTTPError as ex:
print("Error calling: " + str(ex.url) + "..." + str(ex.msg))
# Entry point
# -----------------------------------------------------------------------------------------------
print("Preparing simulation")
# Iterations is time in seconds for each server to simulate
iterations = 3000
# port 8086: Direct to DB specified
# port 8186: To telegraf, telegraf specifies DB
start_time = time.localtime()
database_manager = DatabaseManager('http://localhost:8186', 'testDB')
# Set up InfluxDB (need to wait a little while)
database_manager.database_teardown()
time.sleep(2)
database_manager.database_up()
time.sleep(2)
# configure servers
demoServer_southampton = DemoServer(iterations, 'http://localhost:8186', 'testDB', "Server1", "Southampton")
demoServer_bristol = DemoServer(iterations, 'http://localhost:8186', 'testDB', "Server2", "Bristol")
telegraf_node = Node('http://localhost:8186', 'testDB', True)
server_list = [demoServer_southampton, demoServer_bristol]
client_manager = ClientManager(server_list)
client_manager.generate_new_clients(20)
# Start simulation
print("Starting simulation")
while True:
for server in server_list:
itCount = server.iterateService()
telegraf_node.iterateService()
pcDone = round((itCount / iterations) * 100)
print("Simulation remaining (%): " + str(pcDone) + " \r", end='')
if itCount == 0:
break
for server in server_list:
server.shutdown()
print("\nFinished")
end_time = time.localtime()
print("Started at {0} ended at {1}, total run time {2}".format(start_time,end_time,(end_time-start_time)))
import LineProtocolGenerator as lp
import time
import urllib.parse
import urllib.request
import sys
import random
# Simulation parameters
TICK_TIME = 1
DEFAULT_REQUEST_RATE_INC = 1
DEFAULT_REQUEST_RATE_INC_PERIOD = 10
SIMULATION_TIME_SEC = 60*60
# CLMC parameters
INFLUX_DB_URL = 'http://192.168.50.10:8086'
AGENT_URL1 = 'http://192.168.50.11:8186'
AGENT_URL2 = 'http://192.168.50.12:8186'
# Simulator for services
class sim:
def __init__(self, influx_url):
# We don't need this as the db is CLMC metrics
self.influx_db = 'CLMCMetrics'
self.influx_url = influx_url
# Teardown DB from previous sim and bring it back up
self._deleteDB()
self._createDB()
def run(self, simulation_length_seconds):
start_time = time.time()-SIMULATION_TIME_SEC
sim_time = start_time
# segment_size : the length of video requested at a time
# bit_rate: MPEG-2 High 1080p 25fps = 80Mbps
ip_endpoints = [{'agent_url': AGENT_URL1, 'location': 'DC1', 'cpu': 16,
'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0,
'segment_size': 2, 'video_bit_rate': 80, 'packet_size': 1500},
{'agent_url': AGENT_URL2, 'location': 'DC2', 'cpu': 4,
'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0,
'segment_size': 2, 'video_bit_rate': 80, 'packet_size': 1500}
]
# Simulate configuration of the ipendpoints
# endpoint state->mu, sigma, secs normal distribution
config_delay_dist = {"placing": [10, 0.68], "booting": [10, 0.68],"connecting": [10, 0.68]}
# Place endpoints
max_delay = 0
for ip_endpoint in ip_endpoints:
delay_time = self._changeVMState(sim_time, ip_endpoint, config_delay_dist['placing'][0], config_delay_dist['placing'][0]*config_delay_dist['placing'][1], 'placing', 'placed')
if delay_time > max_delay:
max_delay = delay_time
sim_time +=max_delay
# Boot endpoints
max_delay = 0
for ip_endpoint in ip_endpoints:
delay_time = self._changeVMState(sim_time, ip_endpoint, config_delay_dist['booting'][0], config_delay_dist['booting'][0]*config_delay_dist['booting'][1], 'booting', 'booted')
if delay_time > max_delay:
max_delay = delay_time
sim_time +=max_delay
# Connect endpoints
max_delay = 0
for ip_endpoint in ip_endpoints:
delay_time = self._changeVMState(sim_time, ip_endpoint, config_delay_dist['connecting'][0], config_delay_dist['connecting'][0]*config_delay_dist['connecting'][1], 'connecting', 'connected')
if delay_time > max_delay:
max_delay = delay_time
sim_time +=max_delay
request_arrival_rate_inc = DEFAULT_REQUEST_RATE_INC
request_queue = 0
inc_period_count = 0
for i in range(simulation_length_seconds):
for ip_endpoint in ip_endpoints:
request_processing_time = 0
cpu_time_available = 0
requests_processed = 0
max_requests_processed = 0
cpu_active_time = 0
cpu_idle_time = 0
cpu_usage = 0
cpu_load_time = 0
avg_response_time = 0
peak_response_time = 0
# linear inc to arrival rate
if inc_period_count >= DEFAULT_REQUEST_RATE_INC_PERIOD:
ip_endpoint['request_arrival_rate'] += request_arrival_rate_inc
inc_period_count = 0
else:
inc_period_count += 1
# add new requests to the queue
ip_endpoint['request_queue'] += ip_endpoint['request_arrival_rate']
# time to process one second of video (mS) in the current second
request_processing_time = int(random.normalvariate(10, 10*0.68))
if request_processing_time <= 10:
request_processing_time = 10
# time depends on the length of the segments in seconds
request_processing_time *= ip_endpoint['segment_size']
# amount of cpu time (mS) per tick
cpu_time_available = ip_endpoint['cpu']*TICK_TIME*1000
max_requests_processed = int(cpu_time_available/request_processing_time)
# calc how many requests processed
if ip_endpoint['request_queue'] <= max_requests_processed:
# processed all of the requests
requests_processed = ip_endpoint['request_queue']
else:
# processed the maxmum number of requests
requests_processed = max_requests_processed
# calculate cpu usage
cpu_active_time = int(requests_processed*request_processing_time)
cpu_idle_time = int(cpu_time_available-cpu_active_time)
cpu_usage = cpu_active_time/cpu_time_available
self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, sim_time))
# calc network usage metrics
bytes_rx = 2048*requests_processed
bytes_tx = int(ip_endpoint['video_bit_rate']/8*1000000*requests_processed*ip_endpoint['segment_size'])
self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_network_report(bytes_rx, bytes_tx, sim_time))
# time to process all of the requests in the queue
peak_response_time = ip_endpoint['request_queue']*request_processing_time/ip_endpoint['cpu']
# mid-range
avg_response_time = (peak_response_time+request_processing_time)/2
self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_mpegdash_report('http://localhost/server-status?auto', ip_endpoint['request_arrival_rate'], avg_response_time, peak_response_time, sim_time))
# need to calculate this but sent at 5mS for now
network_request_delay = 0.005
# calculate network response delays (2km link, 100Mbps)
network_response_delay = self._calcNetworkDelay(2000, 100, ip_endpoint['packet_size'], ip_endpoint['video_bit_rate'], ip_endpoint['segment_size'])
e2e_delay = network_request_delay + (avg_response_time/1000) + network_response_delay
self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_ipendpoint_route('http://localhost/server-status?auto', ip_endpoint['request_arrival_rate'], e2e_delay, sim_time))
# remove requests processed off the queue
ip_endpoint['request_queue'] -= int(requests_processed)
sim_time += TICK_TIME
end_time = sim_time
print("Simulation Finished. Start time {0}. End time {1}. Total time {2}".format(start_time,end_time,end_time-start_time))
# distance metres
# bandwidth Mbps
# package size bytes
# tx_video_bit_rate bp/sec
# segment size sec
def _calcNetworkDelay(self, distance, bandwidth, packet_size, tx_video_bit_rate, segment_size):
response_delay = 0
# propogation delay = distance/speed () (e.g 2000 metres * 2*10^8 for optical fibre)
propogation_delay = distance/(2*100000000)
# packetisation delay = ip packet size (bits)/tx rate (e.g. 100Mbp with 0% packet loss)
packetisation_delay = (packet_size*8)/(bandwidth*1000000)
# print('packetisation_delay:', packetisation_delay)
# total number of packets to be sent
packets = (tx_video_bit_rate*1000000)/(packet_size*8)
# print('packets:', packets)
response_delay = packets*(propogation_delay+packetisation_delay)
# print('response_delay:', response_delay)
return response_delay
def _changeVMState(self, sim_time, ip_endpoint, mu, sigma, transition_state, next_state):
delay_time = 0
self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config(transition_state, ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], sim_time))
delay_time = random.normalvariate(mu, sigma)
self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config(next_state, ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], sim_time+delay_time))
return delay_time
def _createDB(self):
self._sendInfluxQuery(self.influx_url, 'CREATE DATABASE ' + self.influx_db)
def _deleteDB(self):
self._sendInfluxQuery(self.influx_url, 'DROP DATABASE ' + self.influx_db)
def _sendInfluxQuery(self, url, query):
query = urllib.parse.urlencode({'q': query})
query = query.encode('ascii')
req = urllib.request.Request(url + '/query ', query)
urllib.request.urlopen(req)
def _sendInfluxData(self, url, data):
data = data.encode()
header = {'Content-Type': 'application/octet-stream'}
req = urllib.request.Request(url + '/write?db=' + self.influx_db, data, header)
urllib.request.urlopen(req)
simulator = sim(INFLUX_DB_URL)
simulator.run(SIMULATION_TIME_SEC)
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment