Skip to content
Snippets Groups Projects
Commit b928d592 authored by MJB's avatar MJB
Browse files

updated simulator based on request rates and responses. More realistic as the...

updated simulator based on request rates and responses. More realistic as the previous logic of the simulation did not make much sense
parent a934061f
No related branches found
No related tags found
No related merge requests found
...@@ -18,7 +18,7 @@ def generate_network_report(recieved_bytes, sent_bytes, time): ...@@ -18,7 +18,7 @@ def generate_network_report(recieved_bytes, sent_bytes, time):
result += ' ' + str(_getNSTime(time)) result += ' ' + str(_getNSTime(time))
# Measurement # Measurement
# print('network'+result) print(result)
return result return result
...@@ -42,13 +42,14 @@ def generate_vm_config(state, cpu, mem, storage, time): ...@@ -42,13 +42,14 @@ def generate_vm_config(state, cpu, mem, storage, time):
# Reports cpu usage, scaling on requests # Reports cpu usage, scaling on requests
def generate_cpu_report(cpu_useage, cpu_usage_system, time): def generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, time):
result = 'vm_host_cpu_usage' result = 'vm_host_cpu_usage'
# Tag # Tag
result += ' ' result += ' '
# field # field
result += 'cpu_usage='+str(cpu_useage) result += 'cpu_usage='+str(cpu_usage)
result += ',cpu_usage_system='+str(cpu_usage_system) result += ',cpu_active_time='+str(cpu_active_time)
result += ',cpu_idle_time='+str(cpu_idle_time)
result += ' ' result += ' '
# Time # Time
result += str(_getNSTime(time)) result += str(_getNSTime(time))
...@@ -70,7 +71,7 @@ def generate_mpegdash_report(resource, requests, avg_response_time, peak_respons ...@@ -70,7 +71,7 @@ def generate_mpegdash_report(resource, requests, avg_response_time, peak_respons
result += 'peak_response_time=' + str(peak_response_time) result += 'peak_response_time=' + str(peak_response_time)
# Timestamp # Timestamp
result += ' ' + str(_getNSTime(time)) result += ' ' + str(_getNSTime(time))
# print(result) print(result)
return result return result
......
...@@ -2,14 +2,20 @@ import LineProtocolGenerator as lp ...@@ -2,14 +2,20 @@ import LineProtocolGenerator as lp
import time import time
import urllib.parse import urllib.parse
import urllib.request import urllib.request
from random import random, randint import sys
import random
# Simulation parameters
TICK_TIME = 1
DEFAULT_REQUEST_RATE_INCREMENT = 5
SIMULATION_TIME_SEC = 10
# CLMC parameters
INFLUX_DB_URL = 'http://192.168.50.10:8086'
# Simulator for services # Simulator for services
class sim: class sim:
def __init__(self, influx_url): def __init__(self, influx_url):
# requests per second for different quality levels
self.quality_request_rate = {"DC1": [10, 20, 10], "DC2": [5, 30, 5]}
# We don't need this as the db is CLMC metrics # We don't need this as the db is CLMC metrics
self.influx_db = 'CLMCMetrics' self.influx_db = 'CLMCMetrics'
self.influx_url = influx_url self.influx_url = influx_url
...@@ -21,47 +27,100 @@ class sim: ...@@ -21,47 +27,100 @@ class sim:
def run(self, simulation_length_seconds): def run(self, simulation_length_seconds):
start_time = time.time() start_time = time.time()
current_time = int(time.time()) current_time = int(time.time())
surrogate_services = [{'agent_url': 'http://192.168.50.11:8186', 'location': 'DC1', 'cpu': 2,
'mem': '8GB', 'storage': '1TB'}, ip_endpoints = [{'agent_url': 'http://192.168.50.11:8186', 'location': 'DC1', 'cpu': 16,
{'agent_url': 'http://192.168.50.12:8186', 'location': 'DC2', 'cpu': 4, 'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0},
'mem': '8GB', 'storage': '1TB'} {'agent_url': 'http://192.168.50.12:8186', 'location': 'DC2', 'cpu': 2,
] 'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0}
# Simulate surrogate services being asserted ]
for service in surrogate_services:
self._sendInfluxData(service['agent_url'], lp.generate_vm_config('starting', service['cpu'], service['mem'], service['storage'], current_time)) # Simulate configuration of the ipendpoints
for service in surrogate_services: # endpoint state->mu, sigma, mS normal distribution
self._sendInfluxData(service['agent_url'], lp.generate_vm_config('running', service['cpu'], service['mem'], service['storage'], current_time)) config_delay_dist = {"placed": [5000, 0.68], "booted": [10000, 0.68],"connected": [1000, 0.68]}
# Run simulation # Place the endpoints
for i in range(simulation_length_seconds): max_delay = 0
for service in surrogate_services: for ip_endpoint in ip_endpoints:
delay_time = int(random.normalvariate(config_delay_dist['placed'][0], config_delay_dist['placed'][0]*config_delay_dist['placed'][1]))
# Scale CPU usage on number of requests, quality and cpu allocation self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config('placed', ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], current_time+delay_time))
cpu_usage = self.quality_request_rate[service['location']][0] if delay_time > max_delay:
cpu_usage += self.quality_request_rate[service['location']][1]*2 max_delay = delay_time
cpu_usage += self.quality_request_rate[service['location']][2]*4
cpu_usage = cpu_usage/service['cpu'] current_time +=max_delay
cpu_usage = cpu_usage/100 # Transform into % max_delay = 0
self._sendInfluxData(service['agent_url'], lp.generate_cpu_report( cpu_usage, cpu_usage, current_time)) # Boot the endpoints
for ip_endpoint in ip_endpoints:
# Scale SENT/REC bytes on requests and quality delay_time = int(random.normalvariate(config_delay_dist['booted'][0], config_delay_dist['booted'][0]*config_delay_dist['booted'][1]))
bytes = self.quality_request_rate[service['location']][0] self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config('booted', ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], current_time+delay_time))
bytes += self.quality_request_rate[service['location']][1]*2 if delay_time > max_delay:
bytes += self.quality_request_rate[service['location']][2]*4 max_delay = delay_time
bytes_sent = 1024*bytes
bytes_rec = 32*bytes current_time +=max_delay
self._sendInfluxData(service['agent_url'], lp.generate_network_report(bytes_rec, bytes_sent, current_time)) max_delay = 0
# Connect the endpoints
# Scale MPEG Dash on requests, quality, cpu usage for ip_endpoint in ip_endpoints:
avg_response_time = randint(0, 5 * self.quality_request_rate[service['location']][0]) delay_time = int(random.normalvariate(config_delay_dist['connected'][0], config_delay_dist['connected'][0]*config_delay_dist['connected'][1]))
avg_response_time += randint(0, 10 * self.quality_request_rate[service['location']][1]) self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config('connected', ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], current_time+delay_time))
avg_response_time += randint(0, 15 * self.quality_request_rate[service['location']][2]) if delay_time > max_delay:
avg_response_time *= cpu_usage max_delay = delay_time
peak_response_time = avg_response_time + randint(30, 60)
requests = sum(self.quality_request_rate[service['location']]) current_time +=max_delay
self._sendInfluxData(service['agent_url'], lp.generate_mpegdash_report('https://Netflix.com/scream', requests, avg_response_time, peak_response_time, current_time))
# Add a second to the clock request_arrival_rate_inc = DEFAULT_REQUEST_RATE_INCREMENT
current_time += 1000 request_queue = 0
for i in range(simulation_length_seconds):
for ip_endpoint in ip_endpoints:
request_processing_time = 0
cpu_time_available = 0
requests_processed = 0
max_requests_processed = 0
cpu_active_time = 0
cpu_idle_time = 0
cpu_usage = 0
cpu_load_time = 0
avg_response_time = 0
peak_response_time = 0
# linear inc to arrival rate
ip_endpoint['request_arrival_rate'] += request_arrival_rate_inc
# add new requests to the queue
ip_endpoint['request_queue'] += ip_endpoint['request_arrival_rate']
# time to process one request (mS) in the current second
request_processing_time = int(random.normalvariate(50, 50*0.95))
if request_processing_time <= 10:
request_processing_time = 10
# amount of cpu time (mS) per tick
cpu_time_available = ip_endpoint['cpu']*TICK_TIME*1000
max_requests_processed = cpu_time_available/request_processing_time
# calc how many requests processed
if ip_endpoint['request_queue'] <= max_requests_processed:
# processed all of the requests
requests_processed = ip_endpoint['request_queue']
else:
# processed the maxmum number of requests
requests_processed = max_requests_processed
# calculate cpu usage
cpu_active_time = int(requests_processed*request_processing_time)
cpu_idle_time = int(cpu_time_available-cpu_active_time)
cpu_usage = cpu_active_time/cpu_time_available
self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, current_time))
# calc network usage metrics with no constraints.
bytes_sent = 1024*requests_processed
bytes_rec = 32*requests_processed
self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_network_report(bytes_rec, bytes_sent, current_time))
# time to process all of the requests in the queue
peak_response_time = ip_endpoint['request_queue']*request_processing_time/ip_endpoint['cpu']
# mid-range
avg_response_time = (peak_response_time+request_processing_time)/2
self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_mpegdash_report('https://Netflix.com/scream', ip_endpoint['request_arrival_rate'], avg_response_time, peak_response_time, current_time))
# remove requests processed off the queue
ip_endpoint['request_queue'] -= int(requests_processed)
current_time += TICK_TIME*1000
end_time = time.time() end_time = time.time()
print("Simulation Finished. Start time {0}. End time {1}. Total time {2}".format(start_time,end_time,end_time-start_time)) print("Simulation Finished. Start time {0}. End time {1}. Total time {2}".format(start_time,end_time,end_time-start_time))
...@@ -83,9 +142,9 @@ class sim: ...@@ -83,9 +142,9 @@ class sim:
data = data.encode() data = data.encode()
header = {'Content-Type': 'application/octet-stream'} header = {'Content-Type': 'application/octet-stream'}
req = urllib.request.Request(url + '/write?db=' + self.influx_db, data, header) req = urllib.request.Request(url + '/write?db=' + self.influx_db, data, header)
urllib.request.urlopen(req) urllib.request.urlopen(req)
simulator = sim('http://192.168.50.10:8086') simulator = sim(INFLUX_DB_URL)
simulator.run(180) simulator.run(SIMULATION_TIME_SEC)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment