Skip to content
Snippets Groups Projects
Commit b928d592 authored by MJB's avatar MJB
Browse files

updated simulator based on request rates and responses. More realistic as the...

updated simulator based on request rates and responses. More realistic as the previous logic of the simulation did not make much sense
parent a934061f
Branches
No related tags found
No related merge requests found
...@@ -18,7 +18,7 @@ def generate_network_report(recieved_bytes, sent_bytes, time): ...@@ -18,7 +18,7 @@ def generate_network_report(recieved_bytes, sent_bytes, time):
result += ' ' + str(_getNSTime(time)) result += ' ' + str(_getNSTime(time))
# Measurement # Measurement
# print('network'+result) print(result)
return result return result
...@@ -42,13 +42,14 @@ def generate_vm_config(state, cpu, mem, storage, time): ...@@ -42,13 +42,14 @@ def generate_vm_config(state, cpu, mem, storage, time):
# Reports cpu usage, scaling on requests # Reports cpu usage, scaling on requests
def generate_cpu_report(cpu_useage, cpu_usage_system, time): def generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, time):
result = 'vm_host_cpu_usage' result = 'vm_host_cpu_usage'
# Tag # Tag
result += ' ' result += ' '
# field # field
result += 'cpu_usage='+str(cpu_useage) result += 'cpu_usage='+str(cpu_usage)
result += ',cpu_usage_system='+str(cpu_usage_system) result += ',cpu_active_time='+str(cpu_active_time)
result += ',cpu_idle_time='+str(cpu_idle_time)
result += ' ' result += ' '
# Time # Time
result += str(_getNSTime(time)) result += str(_getNSTime(time))
...@@ -70,7 +71,7 @@ def generate_mpegdash_report(resource, requests, avg_response_time, peak_respons ...@@ -70,7 +71,7 @@ def generate_mpegdash_report(resource, requests, avg_response_time, peak_respons
result += 'peak_response_time=' + str(peak_response_time) result += 'peak_response_time=' + str(peak_response_time)
# Timestamp # Timestamp
result += ' ' + str(_getNSTime(time)) result += ' ' + str(_getNSTime(time))
# print(result) print(result)
return result return result
......
...@@ -2,14 +2,20 @@ import LineProtocolGenerator as lp ...@@ -2,14 +2,20 @@ import LineProtocolGenerator as lp
import time import time
import urllib.parse import urllib.parse
import urllib.request import urllib.request
from random import random, randint import sys
import random
# Simulation parameters
TICK_TIME = 1
DEFAULT_REQUEST_RATE_INCREMENT = 5
SIMULATION_TIME_SEC = 10
# CLMC parameters
INFLUX_DB_URL = 'http://192.168.50.10:8086'
# Simulator for services # Simulator for services
class sim: class sim:
def __init__(self, influx_url): def __init__(self, influx_url):
# requests per second for different quality levels
self.quality_request_rate = {"DC1": [10, 20, 10], "DC2": [5, 30, 5]}
# We don't need this as the db is CLMC metrics # We don't need this as the db is CLMC metrics
self.influx_db = 'CLMCMetrics' self.influx_db = 'CLMCMetrics'
self.influx_url = influx_url self.influx_url = influx_url
...@@ -21,47 +27,100 @@ class sim: ...@@ -21,47 +27,100 @@ class sim:
def run(self, simulation_length_seconds): def run(self, simulation_length_seconds):
start_time = time.time() start_time = time.time()
current_time = int(time.time()) current_time = int(time.time())
surrogate_services = [{'agent_url': 'http://192.168.50.11:8186', 'location': 'DC1', 'cpu': 2,
'mem': '8GB', 'storage': '1TB'}, ip_endpoints = [{'agent_url': 'http://192.168.50.11:8186', 'location': 'DC1', 'cpu': 16,
{'agent_url': 'http://192.168.50.12:8186', 'location': 'DC2', 'cpu': 4, 'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0},
'mem': '8GB', 'storage': '1TB'} {'agent_url': 'http://192.168.50.12:8186', 'location': 'DC2', 'cpu': 2,
'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0}
] ]
# Simulate surrogate services being asserted
for service in surrogate_services:
self._sendInfluxData(service['agent_url'], lp.generate_vm_config('starting', service['cpu'], service['mem'], service['storage'], current_time))
for service in surrogate_services:
self._sendInfluxData(service['agent_url'], lp.generate_vm_config('running', service['cpu'], service['mem'], service['storage'], current_time))
# Run simulation # Simulate configuration of the ipendpoints
# endpoint state->mu, sigma, mS normal distribution
config_delay_dist = {"placed": [5000, 0.68], "booted": [10000, 0.68],"connected": [1000, 0.68]}
# Place the endpoints
max_delay = 0
for ip_endpoint in ip_endpoints:
delay_time = int(random.normalvariate(config_delay_dist['placed'][0], config_delay_dist['placed'][0]*config_delay_dist['placed'][1]))
self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config('placed', ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], current_time+delay_time))
if delay_time > max_delay:
max_delay = delay_time
current_time +=max_delay
max_delay = 0
# Boot the endpoints
for ip_endpoint in ip_endpoints:
delay_time = int(random.normalvariate(config_delay_dist['booted'][0], config_delay_dist['booted'][0]*config_delay_dist['booted'][1]))
self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config('booted', ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], current_time+delay_time))
if delay_time > max_delay:
max_delay = delay_time
current_time +=max_delay
max_delay = 0
# Connect the endpoints
for ip_endpoint in ip_endpoints:
delay_time = int(random.normalvariate(config_delay_dist['connected'][0], config_delay_dist['connected'][0]*config_delay_dist['connected'][1]))
self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config('connected', ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], current_time+delay_time))
if delay_time > max_delay:
max_delay = delay_time
current_time +=max_delay
request_arrival_rate_inc = DEFAULT_REQUEST_RATE_INCREMENT
request_queue = 0
for i in range(simulation_length_seconds): for i in range(simulation_length_seconds):
for service in surrogate_services: for ip_endpoint in ip_endpoints:
request_processing_time = 0
# Scale CPU usage on number of requests, quality and cpu allocation cpu_time_available = 0
cpu_usage = self.quality_request_rate[service['location']][0] requests_processed = 0
cpu_usage += self.quality_request_rate[service['location']][1]*2 max_requests_processed = 0
cpu_usage += self.quality_request_rate[service['location']][2]*4 cpu_active_time = 0
cpu_usage = cpu_usage/service['cpu'] cpu_idle_time = 0
cpu_usage = cpu_usage/100 # Transform into % cpu_usage = 0
self._sendInfluxData(service['agent_url'], lp.generate_cpu_report( cpu_usage, cpu_usage, current_time)) cpu_load_time = 0
avg_response_time = 0
# Scale SENT/REC bytes on requests and quality peak_response_time = 0
bytes = self.quality_request_rate[service['location']][0]
bytes += self.quality_request_rate[service['location']][1]*2 # linear inc to arrival rate
bytes += self.quality_request_rate[service['location']][2]*4 ip_endpoint['request_arrival_rate'] += request_arrival_rate_inc
bytes_sent = 1024*bytes # add new requests to the queue
bytes_rec = 32*bytes ip_endpoint['request_queue'] += ip_endpoint['request_arrival_rate']
self._sendInfluxData(service['agent_url'], lp.generate_network_report(bytes_rec, bytes_sent, current_time))
# time to process one request (mS) in the current second
# Scale MPEG Dash on requests, quality, cpu usage request_processing_time = int(random.normalvariate(50, 50*0.95))
avg_response_time = randint(0, 5 * self.quality_request_rate[service['location']][0]) if request_processing_time <= 10:
avg_response_time += randint(0, 10 * self.quality_request_rate[service['location']][1]) request_processing_time = 10
avg_response_time += randint(0, 15 * self.quality_request_rate[service['location']][2]) # amount of cpu time (mS) per tick
avg_response_time *= cpu_usage cpu_time_available = ip_endpoint['cpu']*TICK_TIME*1000
peak_response_time = avg_response_time + randint(30, 60) max_requests_processed = cpu_time_available/request_processing_time
requests = sum(self.quality_request_rate[service['location']]) # calc how many requests processed
self._sendInfluxData(service['agent_url'], lp.generate_mpegdash_report('https://Netflix.com/scream', requests, avg_response_time, peak_response_time, current_time)) if ip_endpoint['request_queue'] <= max_requests_processed:
# Add a second to the clock # processed all of the requests
current_time += 1000 requests_processed = ip_endpoint['request_queue']
else:
# processed the maxmum number of requests
requests_processed = max_requests_processed
# calculate cpu usage
cpu_active_time = int(requests_processed*request_processing_time)
cpu_idle_time = int(cpu_time_available-cpu_active_time)
cpu_usage = cpu_active_time/cpu_time_available
self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, current_time))
# calc network usage metrics with no constraints.
bytes_sent = 1024*requests_processed
bytes_rec = 32*requests_processed
self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_network_report(bytes_rec, bytes_sent, current_time))
# time to process all of the requests in the queue
peak_response_time = ip_endpoint['request_queue']*request_processing_time/ip_endpoint['cpu']
# mid-range
avg_response_time = (peak_response_time+request_processing_time)/2
self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_mpegdash_report('https://Netflix.com/scream', ip_endpoint['request_arrival_rate'], avg_response_time, peak_response_time, current_time))
# remove requests processed off the queue
ip_endpoint['request_queue'] -= int(requests_processed)
current_time += TICK_TIME*1000
end_time = time.time() end_time = time.time()
print("Simulation Finished. Start time {0}. End time {1}. Total time {2}".format(start_time,end_time,end_time-start_time)) print("Simulation Finished. Start time {0}. End time {1}. Total time {2}".format(start_time,end_time,end_time-start_time))
...@@ -86,6 +145,6 @@ class sim: ...@@ -86,6 +145,6 @@ class sim:
urllib.request.urlopen(req) urllib.request.urlopen(req)
simulator = sim('http://192.168.50.10:8086') simulator = sim(INFLUX_DB_URL)
simulator.run(180) simulator.run(SIMULATION_TIME_SEC)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment