Skip to content
Snippets Groups Projects
Commit de91a198 authored by Michael Boniface's avatar Michael Boniface
Browse files

Merge branch 'fms-mon' into 'integration'

Fms mon

See merge request FLAME/flame-clmc!8
parents 2ee01f4b e52196f8
No related branches found
No related tags found
No related merge requests found
Showing
with 775 additions and 524 deletions
......@@ -22,95 +22,83 @@
#// Created for Project : FLAME
#//
#/////////////////////////////////////////////////////////////////////////
# Define ipendpoint configuration parameters
# Requirements
require 'getoptlong'
require 'yaml'
# Custom options:
# --infra <infradir>
# Set defaults
DEFAULT_INFRA = "streaming"
# Define custom options
opts = GetoptLong.new(
[ '--infra', GetoptLong::OPTIONAL_ARGUMENT]
)
# Retrieve custom option values
infra = DEFAULT_INFRA
opts.each do |opt, arg|
case opt
when '--infra'
infra = arg
end
end
ipendpoints = {
"ipendpoint1" => {
:ip_address => "192.168.50.11",
:location => "DC1",
:sfc_id => "MS_Template_1",
:sfc_id_instance => "MS_I1",
:sf_id => "adaptive_streaming",
:sf_id_instance => "adaptive_streaming_I1",
:ipendpoint_id => "adaptive_streaming_I1_ipendpoint1",
:influxdb_url => "http://192.168.50.10:8086",
:database_name => "CLMCMetrics"
},
"ipendpoint2" => {
:ip_address => "192.168.50.12",
:location => "DC2",
:sfc_id => "MS_Template_1",
:sfc_id_instance => "MS_I1",
:sf_id => "adaptive_streaming",
:sf_id_instance => "adaptive_streaming_I1",
:ipendpoint_id => "adaptive_streaming_I1_ipendpoint2",
:influxdb_url => "http://192.168.50.10:8086",
:database_name => "CLMCMetrics"
}
}
# load custom config file
puts "loading custom infrastructure configuration: #{infra}"
puts "custom config file: /infra/#{infra}/rspec.yml"
host_rspec_file = "infra/#{infra}/rspec.yml"
hosts = YAML.load_file(host_rspec_file)
# Start creating VMS using xenial64 as the base box
Vagrant.configure("2") do |config|
config.vm.box = "ubuntu/xenial64"
config.vm.define "clmc-service" do |my|
config.vm.network :private_network, ip: "192.168.50.10", virtualbox__intnet: "clmc-net"
my.vm.provider "virtualbox" do |v|
v.customize ["modifyvm", :id, "--memory", 2048]
v.customize ["modifyvm", :id, "--cpus", 1]
# Dynamic VMs
hosts['hosts'].each do |host|
#p host["name"]
instance_name = host["name"]
config.vm.define instance_name do |instance_config|
# Specify VM properties
instance_config.vm.hostname = instance_name
instance_config.disksize.size = host["disk"]
instance_config.vm.provider "virtualbox" do |v|
v.customize ["modifyvm", :id, "--memory", host["memory"]]
v.customize ["modifyvm", :id, "--cpus", host["cpus"]]
end
# open InfluxDB port
config.vm.network "forwarded_port", guest: 8086, host: 8086
# open Chronograf port
config.vm.network "forwarded_port", guest: 8888, host: 8888
# open Kapacitor port
config.vm.network "forwarded_port", guest: 9092, host: 9092
# Configure network, not that we only expect 1 test to be running so we have one internal network
config.vm.network :private_network, ip: "#{host["ip_address"]}", virtualbox__intnet: "clmc-net"
# install the CLMC service
config.vm.provision :shell, :path => 'scripts/influx/install-clmc-service.sh'
# start the CLMC service
config.vm.provision :shell, :path => 'scripts/influx/start-clmc-service.sh'
end
config.vm.define "ipendpoint1" do |my|
config.vm.network :private_network, ip: "#{ipendpoints['ipendpoint1'][:ip_address]}", virtualbox__intnet: "clmc-net"
my.vm.provider "virtualbox" do |v|
v.customize ["modifyvm", :id, "--memory", 512]
v.customize ["modifyvm", :id, "--cpus", 1]
# Port forwarding
puts "Forwarding the following specified ports for #{host["name"]}:"
host['forward_ports'].each do |port|
puts "Forwarding guest:#{port["guest"]} => host:#{port["host"]}"
config.vm.network "forwarded_port", guest: port["guest"], host: port["host"]
end
# Switch case added here to make clmc-service provisioning simple without having to have a complex rspec.yml file
# We only run a service installation script and the agent installation script when creating a specific service VM, not the clmc-service VM
puts "Instance name #{instance_name}:"
case instance_name
when 'clmc-service'
config.vm.provision :shell, :path => "scripts/clmc-service/#{host["install_script"]}"
config.vm.provision :shell, :path => "scripts/clmc-service/#{host["start_script"]}"
else
# specific service install
service_install_path = "test/services/#{host["service_name"]}/install-#{host["service_name"]}.sh"
puts "installing service script: #{service_install_path}"
config.vm.provision :shell, :path => service_install_path
# agent install
config.vm.provision :shell, :path => "scripts/clmc-agent/install-clmc-agent.sh", :args => "/vagrant/test/services/#{host["service_name"]}/telegraf_#{host["service_name"]}_template.conf #{host["location"]} #{host["sfc_id"]} #{host["sfc_id_instance"]} #{host["sf_id"]} #{host["sf_id_instance"]} #{host["ipendpoint_id"]} #{host["influxdb_url"]} #{host["database_name"]}"
# open apache port
config.vm.network "forwarded_port", guest: 80, host: 8080
# install the apache service
config.vm.provision :shell, :path => 'scripts/apache/install-apache.sh'
# Install CLMC agent 1
config.vm.provision :shell, :path => 'scripts/influx/install-clmc-agent.sh', :args => "/vagrant/scripts/influx/telegraf_ipendpoint_template.conf #{ipendpoints['ipendpoint1'][:location]} #{ipendpoints['ipendpoint1'][:sfc_id]} #{ipendpoints['ipendpoint1'][:sfc_id_instance]} #{ipendpoints['ipendpoint1'][:sf_id]} #{ipendpoints['ipendpoint1'][:sf_id_instance]} #{ipendpoints['ipendpoint1'][:ipendpoint_id]} #{ipendpoints['ipendpoint1'][:influxdb_url]} #{ipendpoints['ipendpoint1'][:database_name]}"
end
config.vm.define "ipendpoint2" do |my|
config.vm.network :private_network, ip: "#{ipendpoints['ipendpoint2'][:ip_address]}", virtualbox__intnet: "clmc-net"
my.vm.provider "virtualbox" do |v|
v.customize ["modifyvm", :id, "--memory", 512]
v.customize ["modifyvm", :id, "--cpus", 1]
end
# open apache port
config.vm.network "forwarded_port", guest: 80, host: 8081
# install the apache service
config.vm.provision :shell, :path => 'scripts/apache/install-apache.sh'
# Install CLMC agent
config.vm.provision :shell, :path => 'scripts/influx/install-clmc-agent.sh', :args => "/vagrant/scripts/influx/telegraf_ipendpoint_template.conf #{ipendpoints['ipendpoint2'][:location]} #{ipendpoints['ipendpoint2'][:sfc_id]} #{ipendpoints['ipendpoint2'][:sfc_id_instance]} #{ipendpoints['ipendpoint2'][:sf_id]} #{ipendpoints['ipendpoint2'][:sf_id_instance]} #{ipendpoints['ipendpoint2'][:ipendpoint_id]} #{ipendpoints['ipendpoint2'][:influxdb_url]} #{ipendpoints['ipendpoint2'][:database_name]}"
end
end
end
hosts:
- name: clmc-service
cpus: 1
memory: 2048
disk: "10GB"
forward_ports:
- guest: 8086
host: 8086
- guest: 8888
host: 8888
- guest: 9092
host: 9092
ip_address: "192.168.50.10"
install_script: "install-clmc-service.sh"
start_script: "start-clmc-service.sh"
- name: ipendpoint1
cpus: 1
memory: 2048
disk: "10GB"
service_name: "ipendpoint"
forward_ports:
- guest: 80
host: 8081
ip_address: "192.168.50.11"
location: "DC1"
sfc_id: "MS_Template_1"
sfc_id_instance: "MS_I1"
sf_id: "adaptive_streaming"
sf_id_instance: "adaptive_streaming_I1"
ipendpoint_id: "adaptive_streaming_I1_apache1"
influxdb_url: "http://192.168.50.10:8086"
database_name: "CLMCMetrics"
- name: ipendpoint12
cpus: 1
memory: 2048
disk: "10GB"
service_name: "ipendpoint"
forward_ports:
- guest: 80
host: 8082
ip_address: "192.168.50.12"
location: "DC2"
sfc_id: "MS_Template_1"
sfc_id_instance: "MS_I1"
sf_id: "adaptive_streaming"
sf_id_instance: "adaptive_streaming_I1"
ipendpoint_id: "adaptive_streaming_I1_apache2"
influxdb_url: "http://192.168.50.10:8086"
database_name: "CLMCMetrics"
\ No newline at end of file
hosts:
- name: clmc-service
cpus: 1
memory: 2048
disk: "10GB"
forward_ports:
- guest: 8086
host: 8086
- guest: 8888
host: 8888
- guest: 9092
host: 9092
ip_address: "192.168.50.10"
install_script: "install-clmc-service.sh"
start_script: "start-clmc-service.sh"
- name: apache1
cpus: 1
memory: 2048
disk: "10GB"
service_name: "apache"
forward_ports:
- guest: 80
host: 8081
ip_address: "192.168.50.11"
location: "DC1"
sfc_id: "MS_Template_1"
sfc_id_instance: "MS_I1"
sf_id: "adaptive_streaming"
sf_id_instance: "adaptive_streaming_I1"
ipendpoint_id: "adaptive_streaming_I1_apache1"
influxdb_url: "http://192.168.50.10:8086"
database_name: "CLMCMetrics"
- name: apache2
cpus: 1
memory: 2048
disk: "10GB"
service_name: "apache"
forward_ports:
- guest: 80
host: 8082
ip_address: "192.168.50.12"
location: "DC2"
sfc_id: "MS_Template_1"
sfc_id_instance: "MS_I1"
sf_id: "adaptive_streaming"
sf_id_instance: "adaptive_streaming_I1"
ipendpoint_id: "adaptive_streaming_I1_apache2"
influxdb_url: "http://192.168.50.10:8086"
database_name: "CLMCMetrics"
- name: nginx
cpus: 1
memory: 2048
disk: "10GB"
service_name: "nginx"
forward_ports:
- guest: 80
host: 8083
ip_address: "192.168.50.13"
location: "DC1"
sfc_id: "MS_Template_1"
sfc_id_instance: "MS_I1"
sf_id: "adaptive_streaming"
sf_id_instance: "adaptive_streaming_nginx_I1"
ipendpoint_id: "adaptive_streaming_nginx_I1_apache1"
influxdb_url: "http://192.168.50.10:8086"
database_name: "CLMCMetrics"
- name: mongo
cpus: 1
memory: 2048
disk: "10GB"
service_name: "mongo"
forward_ports:
- guest: 80
host: 8084
ip_address: "192.168.50.14"
location: "DC1"
sfc_id: "MS_Template_1"
sfc_id_instance: "MS_I1"
sf_id: "metadata_database"
sf_id_instance: "metadata_database_I1"
ipendpoint_id: "metadata_database_I1_apache1"
influxdb_url: "http://192.168.50.10:8086"
database_name: "CLMCMetrics"
File moved
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
# Global tags can be specified here in key="value" format.
[global_tags]
# location of the data centre
location="{{LOCATION}}"
# media service template id
sfc="{{SFC_ID}}"
# media service instance
sfc_i="{{SFC_ID_INSTANCE}}"
# service function type
sf="{{SF_ID}}"
# service function instance id
sf_i="{{SF_ID_INSTANCE}}"
# ipendpoint id aka surrogate instance
ipendpoint="{{IP_ENDPOINT_ID}}"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will cache metric_buffer_limit metrics for each output, and will
## flush this buffer on a successful write.
metric_buffer_limit = 1000
## Flush the buffer whenever full, regardless of flush_interval.
flush_buffer_when_full = true
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. You shouldn't set this below
## interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Logging configuration:
## Run telegraf in debug mode
debug = false
## Run telegraf in quiet mode
quiet = false
## Specify the log file name. The empty string means to log to stdout.
logfile = "G:/Telegraf/telegraf.log"
## Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
urls = ["{{INFLUXDB_URL}}"] # required
# The target database for metrics (telegraf will create it if not exists)
database = "{{DATABASE_NAME}}" # required
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
# note: using second precision greatly helps InfluxDB compression
precision = "s"
## Write timeout (for the InfluxDB client), formatted as a string.
## If not provided, will default to 5s. 0s means no timeout (not recommended).
timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
###############################################################################
# INPUTS #
###############################################################################
# # Influx HTTP write listener
[[inputs.http_listener]]
## Address and port to host HTTP listener on
service_address = ":8186"
## timeouts
read_timeout = "10s"
write_timeout = "10s"
## HTTPS
#tls_cert= "/etc/telegraf/cert.pem"
#tls_key = "/etc/telegraf/key.pem"
## MTLS
#tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
File moved
File moved
# coding: utf-8
## ///////////////////////////////////////////////////////////////////////
##
## © University of Southampton IT Innovation Centre, 2018
##
## Copyright in this software belongs to University of Southampton
## IT Innovation Centre of Gamma House, Enterprise Road,
## Chilworth Science Park, Southampton, SO16 7NS, UK.
##
## This software may not be used, sold, licensed, transferred, copied
## or reproduced in whole or in part in any manner or form or in or
## on any media by any person other than in accordance with the terms
## of the Licence Agreement supplied with the software, or otherwise
## without the prior written consent of the copyright owners.
##
## This software is distributed WITHOUT ANY WARRANTY, without even the
## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE, except where stated in the Licence Agreement supplied with
## the software.
##
## Created By : Simon Crowle
## Created Date : 03-01-2018
## Created for Project : FLAME
##
##///////////////////////////////////////////////////////////////////////
from random import random, randint
import math
import time
import datetime
import uuid
import urllib.parse
import urllib.request
import LineProtocolGenerator as lp
# DemoConfig is a configuration class used to set up the simulation
class DemoConfig(object):
def __init__(self):
self.LOG_DATA = False # Log data sent to INFLUX if true
self.ITERATION_STRIDE = 10 # Number of seconds of requests/responses sent to INFLUXDB per HTTP POST
self.SEG_LENGTH = 4 # Each MPEG segment encodes 5 seconds worth of frames (assume double-buffering)
self.MAX_SEG = (30 * 60) / (self.SEG_LENGTH + 1) # 30 mins
self.MIN_QUALITY = 5 # Minimum quality requested by a client
self.MAX_QUALITY = 9 # Maximum quality requested by a client
self.MIN_SERV_RESP_TIME = 100 # Mininum time taken for server to respond to a request (ms)
self.CLIENT_START_DELAY_MAX = 360 # Randomly delay clients starting stream up to 3 minutes
dc = DemoConfig()
# DemoClient is a class the simulations the behaviour of a single client requesting video from the server
class DemoClient(object):
def __init__(self):
self.startRequestOffset = randint(0,
dc.CLIENT_START_DELAY_MAX) # Random time offset before requesting 1st segment
self.numSegRequests = dc.MAX_SEG - randint(0, 50) # Randomly stop client watching all of video
self.id = uuid.uuid4() # Client's ID
self.currSeg = 1 # Client's current segment
self.nextSegCountDown = 0 # Count-down before asking for next segment
self.qualityReq = randint(dc.MIN_QUALITY, dc.MAX_QUALITY) # Randomly assigned quality for this client
self.lastReqID = None # ID used to track last request made by this client
def getQuality(self):
return self.qualityReq
def getLastRequestID(self):
return self.lastReqID
def iterateRequest(self):
result = None
# If the time offset before asking for 1st segment is through and there are more segments to get
# and it is time to get one, then create a request for one!
if (self.startRequestOffset == 0):
if (self.numSegRequests > 0):
if (self.nextSegCountDown == 0):
# Generate a request ID
self.lastReqID = uuid.uuid4()
# Start building the InfluxDB statement
# tags first
result = 'cid="' + str(self.id) + '",'
result += 'segment=' + str(self.currSeg) + ' '
# then fields
result += 'quality=' + str(self.qualityReq) + ','
result += 'index="' + str(self.lastReqID) + '"'
# Update this client's segment tracking
self.currSeg += 1
self.numSegRequests -= 1
self.nextSegCountDown = dc.SEG_LENGTH
else:
self.nextSegCountDown -= 1
else:
self.startRequestOffset -= 1
# Return the _partial_ InfluxDB statement (server will complete the rest)
return result
# Used to tell influx to launch or teardown a database (DB name overwritten by telegraf)
class DatabaseManager:
def __init__(self, influx_url, db_name):
self.influx_url = influx_url
self.influx_db = db_name
def database_up(self):
self._createDB()
def database_teardown(self):
self._deleteDB()
def _createDB(self):
self._sendInfluxQuery('CREATE DATABASE ' + self.influx_db)
def _deleteDB(self):
self._sendInfluxQuery('DROP DATABASE ' + self.influx_db)
def _sendInfluxQuery(self, query):
query = urllib.parse.urlencode({'q': query})
query = query.encode('ascii')
req = urllib.request.Request(self.influx_url + '/query ', query)
urllib.request.urlopen(req)
# Used to allocate clients to servers
class ClientManager:
def __init__(self, servers):
self.servers = servers
def generate_new_clients(self, amount):
assigned_count = 0
while(assigned_count < amount):
for server in self.servers:
if(assigned_count < amount):
server.assign_client(DemoClient())
assigned_count += 1
# Simulates nodes not connected directly to clients (e.g. telegraf)
class Node:
def __init__(self, influxurl, influxdb, input_cpu):
self.influx_url = influxurl
self.influx_db = influxdb
self.report_cpu = input_cpu
def iterateService(self):
if self.report_cpu:
self._sendInfluxData(lp.generate_CPU_report(0))
self._sendInfluxData(lp.generate_mem_report(10, 0))
# Private Methods
# ________________________________________________________________
# This is duplicated from DemoServer, should probably be refactored
def _sendInfluxData(self, data):
data = data.encode()
header = {'Content-Type': 'application/octet-stream'}
req = urllib.request.Request(self.influx_url + '/write?db=' + self.influx_db, data, header)
urllib.request.urlopen(req)
# Container for common SF tags, used as part of generating SF usage reports
# DemoServer is the class that simulates the behaviour of the MPEG-DASH server
class DemoServer(object):
def __init__(self, si, db_url, db_name, server_id, server_location):
self.influxDB = db_name # InfluxDB database name
self.id = uuid.uuid4() # MPEG-DASH server ID
self.simIterations = si # Number of iterations to make for this simulation
self.influxURL = db_url # InfluxDB connection URL
self.currentTime = int(round(time.time() * 1000)) # The current time
self._configure(server_id, server_location)
self.clients = []
def shutdown(self):
print("Shutting down")
self.configure_VM('stopping')
def assign_client(self, new_client):
self.clients.append(new_client)
print('Number of clients: ' + str(len(self.clients)))
def configure_server(self, server_id, server_location):
print("Configuring Servers")
server_conf_block = []
server_conf_block.append(lp._generateServerConfig(server_id, server_location, 8, '100G', '1T',
self._selectDelay(0)))
#ids = ['A', 'B', 'C']
#locations = ['locA', 'locB', 'locC']
#for i, id in enumerate(ids):
# server_conf_block.append(
# lp._generateServerConfig(id, locations[i], 8, '100G', '1T', self._selectDelay(len(ids))))
self._sendInfluxDataBlock(server_conf_block)
def configure_VM(self, state):
print("Configuring VM node")
self._sendInfluxData(self._generateVM(state, 1))
def configure_ports(self):
print("Configuring Servers")
server_conf_block = []
for i in range(0, 10):
server_conf_block.append(lp._configure_port())
self._sendInfluxDataBlock(server_conf_block)
def shutdown_VM(self):
print("Shutting down VM nodes")
VM_conf_block = []
self._generateVMS('stopping', 10, VM_conf_block)
self._sendInfluxDataBlock(VM_conf_block)
def iterateService(self):
# The simulation will run through 'X' iterations of the simulation
# each time this method is called. This allows request/response messages to be
# batched and sent to the InfluxDB in sensible sized blocks
return self._executeServiceIteration(dc.ITERATION_STRIDE)
def _executeServiceIteration(self, count):
requestBlock = []
responseBlock = []
networkBlock = []
SFBlock = []
totalDifference = sumOfclientQuality = percentageDifference = 0
# Keep going until this stride (count) completes
while (count > 0):
count -= 1
# Check we have some iterations to do
if (self.simIterations > 0):
# First record clients that request segments
clientsRequesting = []
# Run through all clients and see if they make a request
for client in self.clients:
# Record request, if it was generated
cReq = client.iterateRequest()
if cReq is not None:
clientsRequesting.append(client)
requestBlock.append(lp._generateClientRequest(cReq, self.id, self.currentTime))
# Now generate request statistics
clientReqCount = len(clientsRequesting)
# Create a single CPU usage metric for this iteration
cpuUsagePercentage = self._cpuUsage(clientReqCount)
# Now generate responses, based on stats
for client in clientsRequesting:
# Generate some quality and delays based on the number of clients requesting for this iteration
qualitySelect = self._selectQuality(client.getQuality(), clientReqCount)
delaySelect = self._selectDelay(clientReqCount) + self.currentTime
qualityDifference = client.getQuality() - qualitySelect
totalDifference += qualityDifference
# print('totalDifference = ' + str(totalDifference) +'\n')
sumOfclientQuality += client.getQuality()
# print('sumOfclientQuality = ' + str(sumOfclientQuality) + '\n')
percentageDifference = int((totalDifference * 100) / sumOfclientQuality)
# print('percentageOfQualityDifference = ' + str(percentageDifference) + '%')
responseBlock.append(lp._generateServerResponse(client.getLastRequestID(), qualitySelect,
delaySelect, cpuUsagePercentage,
percentageDifference))
SFBlock.append(lp._generateMpegDashReport('https://netflix.com/scream', qualitySelect, delaySelect))
networkBlock.append(lp._generateNetworkReport(sumOfclientQuality, delaySelect))
# Iterate the service simulation
self.simIterations -= 1
self.currentTime += 1000 # advance 1 second
# If we have some requests/responses to send to InfluxDB, do it
if (len(requestBlock) > 0 and len(responseBlock) > 0):
self._sendInfluxDataBlock(requestBlock)
self._sendInfluxDataBlock(responseBlock)
self._sendInfluxDataBlock(networkBlock)
self._sendInfluxDataBlock(SFBlock)
print("Sending influx data blocks")
return self.simIterations
def _generateVM(self, state, delay):
return lp._generateVMConfig(state, 1, '100G', '1T', self._selectDelay(delay))
# 'Private' methods ________________________________________________________
def _configure(self, server_id, server_location):
print("Configuring")
self.configure_VM('starting')
self.configure_VM('running')
#time.sleep(0.1)
self.configure_server(server_id, server_location)
self._sendInfluxData(lp._configure_port('01', 'running', '1GB/s', self.currentTime))
self._sendInfluxData(lp._configure_service_function('starting', 100))
#time.sleep(0.1)
self._sendInfluxData(lp._configure_service_function('running', 100))
def _cpuUsage(self, clientCount):
cpuUsage = randint(0, 10)
if (clientCount < 20):
cpuUsage += 5
elif (clientCount >= 20 and clientCount < 40):
cpuUsage += 10
elif (clientCount >= 40 and clientCount < 60):
cpuUsage += 15
elif (clientCount >= 60 and clientCount < 80):
cpuUsage += 20
elif (clientCount >= 80 and clientCount < 110):
cpuUsage += 30
elif (clientCount >= 110 and clientCount < 150):
cpuUsage += 40
elif (clientCount >= 150 and clientCount < 200):
cpuUsage += 55
elif (clientCount >= 200 and clientCount < 300):
cpuUsage += 70
elif (clientCount >= 300):
cpuUsage += 90
return cpuUsage
# Rule to determine a response quality, based on the current number of clients requesting
def _selectQuality(self, expectedQuality, clientCount):
result = dc.MAX_QUALITY
if (clientCount < 50):
result = 8
elif (clientCount >= 50 and clientCount < 100):
result = 7
elif (clientCount >= 100 and clientCount < 150):
result = 6
elif (clientCount >= 150 and clientCount < 200):
result = 5
elif (clientCount >= 200 and clientCount < 250):
result = 4
elif (clientCount >= 250 and clientCount < 300):
result = 3
elif (clientCount >= 300):
result = 2
# Give the client what it wants if possible
if (result > expectedQuality):
result = expectedQuality
return result
# Rule to determine a delay, based on the current number of clients requesting
def _selectDelay(self, cCount):
result = dc.MIN_SERV_RESP_TIME
if (cCount < 50):
result = 150
elif (cCount >= 50 and cCount < 100):
result = 200
elif (cCount > 100 and cCount < 200):
result = 500
elif (cCount >= 200):
result = 1000
# Perturb the delay a bit
result += randint(0, 20)
return result
# InfluxDB data send methods
# -----------------------------------------------------------------------------------------------
def _sendInfluxData(self, data):
data = data.encode()
header = {'Content-Type': 'application/octet-stream'}
req = urllib.request.Request(self.influxURL + '/write?db=' + self.influxDB, data, header)
urllib.request.urlopen(req)
def _sendInfluxDataBlock(self, dataBlock):
msg = ''
for stmt in dataBlock:
msg += stmt + '\n'
try:
if (dc.LOG_DATA == True):
print(msg)
self._sendInfluxData(msg)
except urllib.error.HTTPError as ex:
print("Error calling: " + str(ex.url) + "..." + str(ex.msg))
# Entry point
# -----------------------------------------------------------------------------------------------
print("Preparing simulation")
# Iterations is time in seconds for each server to simulate
iterations = 3000
# port 8086: Direct to DB specified
# port 8186: To telegraf, telegraf specifies DB
start_time = time.localtime()
database_manager = DatabaseManager('http://localhost:8186', 'testDB')
# Set up InfluxDB (need to wait a little while)
database_manager.database_teardown()
time.sleep(2)
database_manager.database_up()
time.sleep(2)
# configure servers
demoServer_southampton = DemoServer(iterations, 'http://localhost:8186', 'testDB', "Server1", "Southampton")
demoServer_bristol = DemoServer(iterations, 'http://localhost:8186', 'testDB', "Server2", "Bristol")
telegraf_node = Node('http://localhost:8186', 'testDB', True)
server_list = [demoServer_southampton, demoServer_bristol]
client_manager = ClientManager(server_list)
client_manager.generate_new_clients(20)
# Start simulation
print("Starting simulation")
while True:
for server in server_list:
itCount = server.iterateService()
telegraf_node.iterateService()
pcDone = round((itCount / iterations) * 100)
print("Simulation remaining (%): " + str(pcDone) + " \r", end='')
if itCount == 0:
break
for server in server_list:
server.shutdown()
print("\nFinished")
end_time = time.localtime()
print("Started at {0} ended at {1}, total run time {2}".format(start_time,end_time,(end_time-start_time)))
......@@ -90,15 +90,6 @@
# user_agent = "telegraf"
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
[[outputs.file]]
## Files to write to, "stdout" is a specially handled file.
files = ["stdout", "/tmp/metrics.out"]
## Data format to output.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
###############################################################################
......
#!/bin/bash
#/////////////////////////////////////////////////////////////////////////
#//
#// (c) University of Southampton IT Innovation Centre, 2017
#//
#// Copyright in this software belongs to University of Southampton
#// IT Innovation Centre of Gamma House, Enterprise Road,
#// Chilworth Science Park, Southampton, SO16 7NS, UK.
#//
#// This software may not be used, sold, licensed, transferred, copied
#// or reproduced in whole or in part in any manner or form or in or
#// on any media by any person other than in accordance with the terms
#// of the Licence Agreement supplied with the software, or otherwise
#// without the prior written consent of the copyright owners.
#//
#// This software is distributed WITHOUT ANY WARRANTY, without even the
#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
#// PURPOSE, except where stated in the Licence Agreement supplied with
#// the software.
#//
#// Created By : Michael Boniface
#// Created Date : 23/01/2018
#// Created for Project : FLAME
#//
#/////////////////////////////////////////////////////////////////////////
# Install ipendpoint
# This is a dummy script as the endpoint is driven by simulation
\ No newline at end of file
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
# Global tags can be specified here in key="value" format.
[global_tags]
# location of the data centre
location="{{LOCATION}}"
# media service template id
sfc="{{SFC_ID}}"
# media service instance
sfc_i="{{SFC_ID_INSTANCE}}"
# service function type
sf="{{SF_ID}}"
# service function instance id
sf_i="{{SF_ID_INSTANCE}}"
# ipendpoint id aka surrogate instance
ipendpoint="{{IP_ENDPOINT_ID}}"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will cache metric_buffer_limit metrics for each output, and will
## flush this buffer on a successful write.
metric_buffer_limit = 1000
## Flush the buffer whenever full, regardless of flush_interval.
flush_buffer_when_full = true
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. You shouldn't set this below
## interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Logging configuration:
## Run telegraf in debug mode
debug = false
## Run telegraf in quiet mode
quiet = false
## Specify the log file name. The empty string means to log to stdout.
logfile = "G:/Telegraf/telegraf.log"
## Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
urls = ["{{INFLUXDB_URL}}"] # required
# The target database for metrics (telegraf will create it if not exists)
database = "{{DATABASE_NAME}}" # required
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
# note: using second precision greatly helps InfluxDB compression
precision = "s"
## Write timeout (for the InfluxDB client), formatted as a string.
## If not provided, will default to 5s. 0s means no timeout (not recommended).
timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
###############################################################################
# INPUTS #
###############################################################################
# # Influx HTTP write listener
[[inputs.http_listener]]
## Address and port to host HTTP listener on
service_address = ":8186"
## timeouts
read_timeout = "10s"
write_timeout = "10s"
## HTTPS
#tls_cert= "/etc/telegraf/cert.pem"
#tls_key = "/etc/telegraf/key.pem"
## MTLS
#tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
\ No newline at end of file
#!/bin/bash
#/////////////////////////////////////////////////////////////////////////
#//
#// (c) University of Southampton IT Innovation Centre, 2017
#//
#// Copyright in this software belongs to University of Southampton
#// IT Innovation Centre of Gamma House, Enterprise Road,
#// Chilworth Science Park, Southampton, SO16 7NS, UK.
#//
#// This software may not be used, sold, licensed, transferred, copied
#// or reproduced in whole or in part in any manner or form or in or
#// on any media by any person other than in accordance with the terms
#// of the Licence Agreement supplied with the software, or otherwise
#// without the prior written consent of the copyright owners.
#//
#// This software is distributed WITHOUT ANY WARRANTY, without even the
#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
#// PURPOSE, except where stated in the Licence Agreement supplied with
#// the software.
#//
#// Created By : Michael Boniface
#// Created Date : 23/01/2018
#// Created for Project : FLAME
#//
#/////////////////////////////////////////////////////////////////////////
# Install apache
sudo apt-get update
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 2930ADAE8CAF5059EE73BB4B58712A2291FA4AD5
echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.6 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.6.list
sudo apt-get update
sudo apt-get install -y mongodb-org
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
# Global tags can be specified here in key="value" format.
[global_tags]
# location of the data centre
location="{{LOCATION}}"
# media service template id
sfc="{{SFC_ID}}"
# media service instance
sfc_i="{{SFC_ID_INSTANCE}}"
# service function type
sf="{{SF_ID}}"
# service function instance id
sf_i="{{SF_ID_INSTANCE}}"
# ipendpoint id aka surrogate instance
ipendpoint="{{IP_ENDPOINT_ID}}"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will cache metric_buffer_limit metrics for each output, and will
## flush this buffer on a successful write.
metric_buffer_limit = 1000
## Flush the buffer whenever full, regardless of flush_interval.
flush_buffer_when_full = true
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. You shouldn't set this below
## interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Logging configuration:
## Run telegraf in debug mode
debug = false
## Run telegraf in quiet mode
quiet = false
## Specify the log file name. The empty string means to log to stdout.
logfile = "G:/Telegraf/telegraf.log"
## Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
urls = ["{{INFLUXDB_URL}}"] # required
# The target database for metrics (telegraf will create it if not exists)
database = "{{DATABASE_NAME}}" # required
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
# note: using second precision greatly helps InfluxDB compression
precision = "s"
## Write timeout (for the InfluxDB client), formatted as a string.
## If not provided, will default to 5s. 0s means no timeout (not recommended).
timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
###############################################################################
# INPUTS #
###############################################################################
# # Influx HTTP write listener
[[inputs.http_listener]]
## Address and port to host HTTP listener on
service_address = ":8186"
## timeouts
read_timeout = "10s"
write_timeout = "10s"
## HTTPS
#tls_cert= "/etc/telegraf/cert.pem"
#tls_key = "/etc/telegraf/key.pem"
## MTLS
#tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
[[inputs.mongodb]]
## An array of URLs of the form:
## "mongodb://" [user ":" pass "@"] host [ ":" port]
## For example:
## mongodb://user:auth_key@10.10.3.30:27017,
## mongodb://10.10.3.33:18832,
servers = ["mongodb://127.0.0.1:27017"]
gather_perdb_stats = false
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
\ No newline at end of file
#!/bin/bash
#/////////////////////////////////////////////////////////////////////////
#//
#// (c) University of Southampton IT Innovation Centre, 2017
#//
#// Copyright in this software belongs to University of Southampton
#// IT Innovation Centre of Gamma House, Enterprise Road,
#// Chilworth Science Park, Southampton, SO16 7NS, UK.
#//
#// This software may not be used, sold, licensed, transferred, copied
#// or reproduced in whole or in part in any manner or form or in or
#// on any media by any person other than in accordance with the terms
#// of the Licence Agreement supplied with the software, or otherwise
#// without the prior written consent of the copyright owners.
#//
#// This software is distributed WITHOUT ANY WARRANTY, without even the
#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
#// PURPOSE, except where stated in the Licence Agreement supplied with
#// the software.
#//
#// Created By : Michael Boniface
#// Created Date : 01/02/2018
#// Created for Project : FLAME
#//
#/////////////////////////////////////////////////////////////////////////
# Install nginx
sudo apt-get update
yes Y | sudo apt-get install nginx
# Need to set up basic stats as this not configured by default
# http://nginx.org/en/docs/http/ngx_http_stub_status_module.html
nginx -s reload
# start NGINX
systemctl start nginx
\ No newline at end of file
# Telegraf configuration
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
# Global tags can be specified here in key="value" format.
[global_tags]
# location of the data centre
location="{{LOCATION}}"
# media service template id
sfc="{{SFC_ID}}"
# media service instance
sfc_i="{{SFC_ID_INSTANCE}}"
# service function type
sf="{{SF_ID}}"
# service function instance id
sf_i="{{SF_ID_INSTANCE}}"
# ipendpoint id aka surrogate instance
ipendpoint="{{IP_ENDPOINT_ID}}"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will cache metric_buffer_limit metrics for each output, and will
## flush this buffer on a successful write.
metric_buffer_limit = 1000
## Flush the buffer whenever full, regardless of flush_interval.
flush_buffer_when_full = true
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. You shouldn't set this below
## interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Logging configuration:
## Run telegraf in debug mode
debug = false
## Run telegraf in quiet mode
quiet = false
## Specify the log file name. The empty string means to log to stdout.
logfile = "G:/Telegraf/telegraf.log"
## Override default hostname, if empty use os.Hostname()
hostname = ""
###############################################################################
# OUTPUTS #
###############################################################################
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
urls = ["{{INFLUXDB_URL}}"] # required
# The target database for metrics (telegraf will create it if not exists)
database = "{{DATABASE_NAME}}" # required
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
# note: using second precision greatly helps InfluxDB compression
precision = "s"
## Write timeout (for the InfluxDB client), formatted as a string.
## If not provided, will default to 5s. 0s means no timeout (not recommended).
timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
# Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
# Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
###############################################################################
# INPUTS #
###############################################################################
# # Influx HTTP write listener
[[inputs.http_listener]]
## Address and port to host HTTP listener on
service_address = ":8186"
## timeouts
read_timeout = "10s"
write_timeout = "10s"
## HTTPS
#tls_cert= "/etc/telegraf/cert.pem"
#tls_key = "/etc/telegraf/key.pem"
## MTLS
#tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
# Read Nginx's basic status information (ngx_http_stub_status_module)
[[inputs.nginx]]
## An array of Nginx stub_status URI to gather stats.
urls = ["http://localhost/server_status"]
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## HTTP response timeout (default: 5s)
response_timeout = "5s"
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment