Skip to content
Snippets Groups Projects
Commit beac0662 authored by Simon Crowle's avatar Simon Crowle
Browse files

Merge branch 'endpointConfig' into 'mediaComponentConfig'

Endpoint config

See merge request FLAME/flame-clmc!28
parents 3c37fffd de3857ec
No related branches found
No related tags found
No related merge requests found
......@@ -23,23 +23,6 @@ def generate_network_report(recieved_bytes, sent_bytes, time):
return result
# Formats VM config
def generate_vm_config(state, cpu, mem, storage, time):
result = [{"measurement": "vm_res_alloc",
"tags": {
"vm_state": state
},
"fields": {
"cpu": cpu,
"memory": mem,
"storage": storage
},
"time": _getNSTime(time)
}]
return result
# Reports cpu usage, scaling on requests
def generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, time):
result = [{"measurement": "cpu_usage",
......@@ -87,12 +70,40 @@ def generate_ipendpoint_route(resource, requests, latency, time):
return result
# InfluxDB likes to have time-stamps in nanoseconds
def _getNSTime(time):
# Convert to nano-seconds
timestamp = int(1000000000*time)
def generate_endpoint_config(time, cpu, mem, storage, current_state, current_state_time, **kwargs):
"""
generates a measurement for a VM configuration states
:param cpu: the number of CPUs of VM endpoint
:param mem: memory of VM endpoint
:param storage: storage capacity of VM endpoint
:param current_state: the current state the endpoint is in (TAG)
:param current_state_time: the part of the sampling period the endpoint was in the current state
:param time: time of measurement
:param kwargs: 'python-style' keyword arguments used to store the state as a key and it's respective state period (in seconds) as value
:return: dictionary object representing the data to post on influx
"""
# lambda function to validate whether a state is given as a key in the keyword arguments dictionary
validate = lambda key: kwargs.get(key) if key in kwargs else 0.0
# generate and validate the state values
fields = {"cpus": cpu, "memory": mem, "storage": storage} # NOTE: Do we need these fields ?
for state in ("unplaced", "placing", "placed", "booting", "booted", "connecting", "connected"):
fields[state] = validate(state)
fields[("avg_{0}".format(state))] = validate("avg_{0}".format(state))
result = [{"measurement": "endpoint_config",
"tags": {
"current_state": current_state,
"current_state_time": current_state_time,
},
"fields": fields,
"time": _getNSTime(time)}]
return result
return timestamp
def generate_mc_service_config( mcMeasurement, stateTimeStats, time ):
......@@ -142,6 +153,13 @@ def validate_state_time_stats( stateTimeStats ):
return stateTimeStats
# InfluxDB likes to have time-stamps in nanoseconds
def _getNSTime(time):
# Convert to nano-seconds
timestamp = int(1000000000*time)
return timestamp
# DEPRECATED
# ____________________________________________________________________________
......@@ -153,6 +171,23 @@ def quote_wrap(string):
return "\"" + string + "\""
# Formats VM config
def generate_vm_config(state, cpu, mem, storage, time):
result = [{"measurement": "vm_res_alloc",
"tags": {
"vm_state": state
},
"fields": {
"cpu": cpu,
"memory": mem,
"storage": storage
},
"time": _getNSTime(time)
}]
return result
def _generateClientRequest(cReq, id, time):
# Tags first
result = 'sid="' + str(id) + '",' + cReq
......
This diff is collapsed.
......@@ -12,16 +12,16 @@ def streaming_sim_config():
"""
Reads the service configuration deployed for the streaming simulation test.
:param request: access the parameters of the fixture
:return: the python object representing the read YAML file
"""
rspec = pkg_resources.resource_filename('clmctest.monitoring', 'rspec.yml')
print("rspec file: {0}".format(rspec))
print("\nrspec file: {0}".format(rspec))
with open(rspec, 'r') as stream:
data_loaded = yaml.load(stream)
return data_loaded
@pytest.fixture(params=[{'database': 'CLMCMetrics'}], scope='module')
def influx_db(streaming_sim_config, request):
"""
......@@ -34,6 +34,7 @@ def influx_db(streaming_sim_config, request):
return InfluxDBClient(host=streaming_sim_config['hosts'][0]['ip_address'], port='8086', database=request.param['database'], timeout=10)
@pytest.fixture(scope="module")
def simulator(streaming_sim_config):
......@@ -42,10 +43,7 @@ def simulator(streaming_sim_config):
agent1_url = "http://" + streaming_sim_config['hosts'][1]['ip_address'] + ":8186"
agent2_url = "http://" + streaming_sim_config['hosts'][2]['ip_address'] + ":8186"
simulator = Sim( influx_url, influx_db_name, agent1_url, agent2_url )
dbs = simulator.db_client.get_list_database()
dbs = [db.get("name") for db in dbs]
simulator = Sim(influx_url, influx_db_name, agent1_url, agent2_url)
simulator.reset()
......
......@@ -9,17 +9,16 @@ class TestSimulation(object):
"""
A testing class used to group all the tests related to the simulation data
"""
@pytest.fixture(scope='class')
def run_simulator( self, simulator ):
random.seed( 0 ) # Seed random function so we can reliably test for average queries
@pytest.fixture(scope='class', autouse=True)
def run_simulator(self, simulator):
random.seed(0) # Seed random function so we can reliably test for average queries
print( "Running simulation, please wait..." )
simulator.run( 3600 )
print( "Waiting for INFLUX to finish receiving simulation data..." )
time.sleep( 10 ) # wait for data to finish arriving at the INFLUX database
print("Running simulation, please wait...")
simulator.run(3600)
print("Waiting for INFLUX to finish receiving simulation data...")
time.sleep(10) # wait for data to finish arriving at the INFLUX database
print( "... simulation data fixture finished" )
@pytest.mark.parametrize("query, expected_result", [
......@@ -31,33 +30,52 @@ class TestSimulation(object):
{"time": "1970-01-01T00:00:00Z", "count_avg_response_time": 7200, "count_peak_response_time": 7200, "count_requests": 7200}),
('SELECT count(*) FROM "CLMCMetrics"."autogen"."net_port_io"',
{"time": "1970-01-01T00:00:00Z", "count_RX_BYTES_PORT_M": 7200, "count_TX_BYTES_PORT_M": 7200}),
('SELECT count(*) FROM "CLMCMetrics"."autogen"."vm_res_alloc"',
{"time": "1970-01-01T00:00:00Z", "count_cpu": 12, "count_memory": 12, "count_storage": 12}),
# Media component state tests
('SELECT count(*) FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE ipendpoint=\'adaptive_streaming_I1_apache1\'',
{"time": "1970-01-01T00:00:00Z", "count_unplaced": 3639, "count_avg_unplaced": 3639, "count_placing": 3639, "count_avg_placing": 3639, "count_placed": 3639, "count_avg_placed": 3639, "count_booting": 3639, "count_avg_booting": 3639, "count_booted": 3639,
"count_avg_booted": 3639, "count_connecting": 3639, "count_avg_connecting": 3639, "count_connected": 3639, "count_avg_connected": 3639, "count_cpus": 3639, "count_memory": 3639, "count_storage": 3639}),
('SELECT count(*) FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE ipendpoint=\'adaptive_streaming_I1_apache2\'',
{"time": "1970-01-01T00:00:00Z", "count_unplaced": 3639, "count_avg_unplaced": 3639, "count_placing": 3639, "count_avg_placing": 3639, "count_placed": 3639, "count_avg_placed": 3639, "count_booting": 3639, "count_avg_booting": 3639, "count_booted": 3639,
"count_avg_booted": 3639, "count_connecting": 3639, "count_avg_connecting": 3639, "count_connected": 3639, "count_avg_connected": 3639, "count_cpus": 3639, "count_memory": 3639, "count_storage": 3639}),
('SELECT count(*) FROM "CLMCMetrics"."autogen"."mpegdash_service_config" WHERE ipendpoint=\'adaptive_streaming_I1_apache1\'',
{"time" : "1970-01-01T00:00:00Z", "count_avg_running" : 3609, "count_avg_starting" : 3609, "count_avg_stopped" : 3609, "count_avg_stopping" : 3609, "count_running" : 3609, "count_starting" : 3609, "count_stopped" : 3609, "count_stopping" : 3609}),
{"time": "1970-01-01T00:00:00Z", "count_avg_running": 3609, "count_avg_starting": 3609, "count_avg_stopped": 3609, "count_avg_stopping": 3609, "count_running": 3609, "count_starting": 3609, "count_stopped": 3609, "count_stopping": 3609}),
('SELECT count(*) FROM "CLMCMetrics"."autogen"."mpegdash_service_config" WHERE ipendpoint=\'adaptive_streaming_I1_apache2\'',
{"time" : "1970-01-01T00:00:00Z", "count_avg_running" : 3609, "count_avg_starting" : 3609, "count_avg_stopped" : 3609, "count_avg_stopping" : 3609, "count_running" : 3609, "count_starting" : 3609, "count_stopped" : 3609, "count_stopping" : 3609}),
('SELECT mean(avg_stopped) as "avg_stopped" FROM "CLMCMetrics"."autogen"."mpegdash_service_config" WHERE avg_stopped <>0',
{"time" : "1970-01-01T00:00:00Z", "avg_stopped" : 0.15}),
('SELECT mean(avg_starting) as "avg_starting" FROM "CLMCMetrics"."autogen"."mpegdash_service_config" WHERE avg_starting <>0',
{"time" : "1970-01-01T00:00:00Z", "avg_starting" : 0.9166666666666666}),
('SELECT mean(avg_running) as "avg_running" FROM "CLMCMetrics"."autogen"."mpegdash_service_config" WHERE avg_running <>0',
{"time" : "1970-01-01T00:00:00Z", "avg_running" : 0.9997502081598669}),
('SELECT mean(avg_stopping) as "avg_stopping" FROM "CLMCMetrics"."autogen"."mpegdash_service_config" WHERE avg_stopping <>0',
{"time" : "1970-01-01T00:00:00Z", "avg_stopping" : 0.55})
{"time": "1970-01-01T00:00:00Z", "count_avg_running": 3609, "count_avg_starting": 3609, "count_avg_stopped": 3609, "count_avg_stopping": 3609, "count_running": 3609, "count_starting": 3609, "count_stopped": 3609, "count_stopping": 3609}),
('SELECT mean(avg_placing) as "avg_placing" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE avg_placing <> 0 and ipendpoint=\'adaptive_streaming_I1_apache1\'',
{"time": "1970-01-01T00:00:00Z", "avg_placing": 9.4}),
('SELECT mean(avg_booting) as "avg_booting" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE avg_booting <> 0 and ipendpoint=\'adaptive_streaming_I1_apache1\'',
{"time": "1970-01-01T00:00:00Z", "avg_booting": 9.6}),
('SELECT mean(avg_connecting) as "avg_connecting" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE avg_connecting <> 0 and ipendpoint=\'adaptive_streaming_I1_apache1\'',
{"time": "1970-01-01T00:00:00Z", "avg_connecting": 10.2}),
('SELECT mean(avg_connected) as "avg_connected" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE avg_connected <> 0 and ipendpoint=\'adaptive_streaming_I1_apache1\'',
{"time": "1970-01-01T00:00:00Z", "avg_connected": 3605.0}),
('SELECT mean(avg_placing) as "avg_placing" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE avg_placing <> 0 and ipendpoint=\'adaptive_streaming_I1_apache2\'',
{"time": "1970-01-01T00:00:00Z", "avg_placing": 9.4}),
('SELECT mean(avg_booting) as "avg_booting" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE avg_booting <> 0 and ipendpoint=\'adaptive_streaming_I1_apache2\'',
{"time": "1970-01-01T00:00:00Z", "avg_booting": 9.6}),
('SELECT mean(avg_connecting) as "avg_connecting" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE avg_connecting <> 0 and ipendpoint=\'adaptive_streaming_I1_apache2\'',
{"time": "1970-01-01T00:00:00Z", "avg_connecting": 10.2}),
('SELECT mean(avg_connected) as "avg_connected" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE avg_connected <> 0 and ipendpoint=\'adaptive_streaming_I1_apache2\'',
{"time": "1970-01-01T00:00:00Z", "avg_connected": 3605.0}),
('SELECT mean(avg_stopped) as "avg_stopped" FROM "CLMCMetrics"."autogen"."mpegdash_service_config" WHERE avg_stopped <> 0',
{"time": "1970-01-01T00:00:00Z", "avg_stopped": 0.15}),
('SELECT mean(avg_starting) as "avg_starting" FROM "CLMCMetrics"."autogen"."mpegdash_service_config" WHERE avg_starting <> 0',
{"time": "1970-01-01T00:00:00Z", "avg_starting": 0.9166666666666666}),
('SELECT mean(avg_running) as "avg_running" FROM "CLMCMetrics"."autogen"."mpegdash_service_config" WHERE avg_running <> 0',
{"time": "1970-01-01T00:00:00Z", "avg_running": 0.9997502081598669}),
('SELECT mean(avg_stopping) as "avg_stopping" FROM "CLMCMetrics"."autogen"."mpegdash_service_config" WHERE avg_stopping <> 0',
{"time": "1970-01-01T00:00:00Z", "avg_stopping": 0.55})
])
def test_simulation( self, run_simulator, influx_db, query, expected_result ):
def test_simulation(self, influx_db, query, expected_result):
"""
This is the entry point of the test. This method will be found and executed when the module is ran using pytest
:param query: the query to execute (value obtained from the pytest parameter decorator)
:param expected_result: the result expected from executing the query (value obtained from the pytest parameter decorator)
:param influx_db the import db client fixture - imported from contest.py
:param run_simulation_fixture: the imported fixture to use to generate the testing data - the return value of the fixture is not needed in this case
"""
# pytest automatically goes through all queries under test, declared in the parameters decorator
......@@ -69,7 +87,7 @@ class TestSimulation(object):
# test the error attribute of the result is None, that is no error is returned from executing the DB query
assert query_result.error is None, "An error was encountered while executing query {0}.".format(query)
# get the dictionary of result points; the next() function just gets the first element of the query results iterator (we only expect one item in the iterator)
# get the dictionary of result points; the next() function just gets the first element of the query results generator (we only expect one item in the generator)
actual_result = next(query_result.get_points())
assert expected_result == actual_result, "Simulation test failure"
......
......@@ -2,8 +2,11 @@
import pytest
import yaml
import requests
import time
import pkg_resources
@pytest.fixture(scope="module")
def streaming_config():
"""
......@@ -18,3 +21,16 @@ def streaming_config():
with open(rspec, 'r') as stream:
data_loaded = yaml.load(stream)
return data_loaded
@pytest.fixture(scope="module", autouse=True,
params=[{'config': {'kapacitor_url': 'http://localhost:8888/chronograf/v1/sources/1/kapacitors', 'kapacitor_file': '/vagrant/test/streaming/kapacitor.json'}}])
def kapacitor_config(request):
kapacitor_configuration = request.param['config']['kapacitor_file']
with open(kapacitor_configuration, "r") as rule_file:
data = "".join(line.strip() for line in rule_file.readlines())
kapacitor_url = request.param['config']['kapacitor_url']
requests.post(url=kapacitor_url, data=data, headers={"Content-Type": "application/json"})
time.sleep(1)
......@@ -33,18 +33,8 @@ echo $TEST_DIR"/kapacitor.conf"
cp $TEST_DIR/kapacitor.conf /etc/kapacitor/kapacitor.conf
systemctl start kapacitor
# wait for kapacitor to restart
# TODO: do this better
sleep 5
# Set up Influx data source
curl -i -X POST -H "Content-Type: application/json" http://localhost:8888/chronograf/v1/sources -d @$TEST_DIR/influx.json
# Set up Kapacitor
curl -i -X POST -H "Content-Type: application/json" http://localhost:8888/chronograf/v1/sources/1/kapacitors -d @$TEST_DIR/kapacitor.json
# Set up rules
curl -i -X POST -H "Content-Type: application/json" http://localhost:9092/kapacitor/v1/tasks -d @$TEST_DIR/rules.json
# Set up dashboard
curl -i -X POST -H "Content-Type: application/json" http://localhost:8888/chronograf/v1/dashboards -d @$TEST_DIR/dashboard.json
{
"id" : "TestRule1",
"type" : "batch",
"dbrps" : [{"db": "CLMCMetrics", "rp" : "autogen"}],
"script" : "var ruleData = batch\r\n |query(''' SELECT mean(\"handled\") AS \"mean_handled\" FROM \"CLMCMetrics\".\"autogen\".\"nginx\" WHERE \"ipendpoint\"='adaptive_streaming_I1_nginx1' ''')\r\n .period(5s)\r\n .every(5s)\r\n\r\nvar ruleAlert = ruleData\r\n |alert()\r\n .id('{{ .Name }}\/adaptive_streaming_I1_nginx1')\r\n .message('{{ .ID }} is {{ .Level }} Mean handled connections: {{ index .Fields \"mean_handled\" }}')\r\n .warn(lambda: \"mean_handled\" > 10)\r\n .log( '\/tmp\/TestRule1.log' )",
"status" : "enabled"
}
\ No newline at end of file
{
"id" : "TestRule2",
"type" : "batch",
"dbrps" : [{"db": "CLMCMetrics", "rp" : "autogen"}],
"script" : "var ruleData = batch\r\n |query(''' SELECT mean(\"waiting\") AS \"mean_waiting\" FROM \"CLMCMetrics\".\"autogen\".\"nginx\" WHERE \"ipendpoint\"='adaptive_streaming_I1_nginx1' ''')\r\n .period(5s)\r\n .every(5s)\r\n\r\nvar ruleAlert = ruleData\r\n |alert()\r\n .id('{{ .Name }}\/adaptive_streaming_I1_nginx1')\r\n .message('{{ .ID }} is {{ .Level }} Mean waiting connections: {{ index .Fields \"mean_waiting\" }}')\r\n .warn(lambda: \"mean_waiting\" > 10)\r\n .log( '\/tmp\/TestRule2.log' )",
"status" : "enabled"
}
\ No newline at end of file
......@@ -21,6 +21,8 @@ class TestStreamingAlerts(object):
@pytest.mark.parametrize("rule, log", [
("rules.json", "/tmp/RPSLoad.log"),
("test_rule1.json", "/tmp/TestRule1.log"),
("test_rule2.json", "/tmp/TestRule2.log"),
])
def test_alerts(self, rule, log, streaming_url, streaming_manifest):
"""
......@@ -38,14 +40,10 @@ class TestStreamingAlerts(object):
:param streaming_manifest: the fixture providing the root of the XML streaming manifest
"""
kapacitor_setter = self.kapacitor_setting(rule)
kapacitor_setter = self.kapacitor_setting(rule, log)
next(kapacitor_setter) # Setup the test rule
try:
if isfile(log):
remove(log) # delete log file if existing from previous tests
except PermissionError:
system("sudo rm {0}".format(log)) # handles the case for running on linux where permission will be required to delete the old log file
print("Testing alert creation for rule: {0}".format(rule))
segments = streaming_manifest.findall(".//{urn:mpeg:DASH:schema:MPD:2011}SegmentURL")
......@@ -56,9 +54,11 @@ class TestStreamingAlerts(object):
t.start()
alert_created = False
counter = 0
time_delay = 2.5
while True:
# loop while threads are execution and do a check every 2.5 seconds to check if either alert log has been created or threads have finished execution
sleep(2.5)
sleep(time_delay)
if isfile(log):
for t in threads: # kill all running threads in case log file is created beforehand
t.stop()
......@@ -67,32 +67,55 @@ class TestStreamingAlerts(object):
if threads_queue.full():
break
assert alert_created, "Alerts test failed: no log file is created indicating a triggered alert."
counter += time_delay # the counter tracks the time taken; for the rules under test usually a 30 seconds time frame is enough to trigger the alert
if counter >= 12*time_delay:
for t in threads: # kill all running threads in case of test failure
t.stop()
break
assert alert_created, "Alerts test failed: no log file is created indicating a triggered alert for rule {0}.".format(rule)
print("\nSuccessfully passed alert creation test.\n")
print("Successfully passed alert creation test for rule: {0}.".format(rule))
next(kapacitor_setter) # Teardown the test rule
def kapacitor_setting(self, rule):
def kapacitor_setting(self, rule, log):
"""
A generator function used to provide setUp/tearDown actions for a particular kapacitor rule.
On setUp rule is initialized, on tearDown rule is deleted. Interleaving is achieved using the generator pattern.
:param rule: the name of the json file for the rule under test
:param log: the absolute path of the log file that's being tested
"""
# check if the log file is already created due to a previous test
try:
if isfile(log):
remove(log) # delete log file if existing from previous tests
except PermissionError:
system("sudo rm {0}".format(log)) # handles the case for running on linux where permission will be required to delete the old log file
# Initialization of the kapacitor rule - Test setUp (UnitTest style)
with open(join(dirname(__file__), rule), "r") as rule_file:
data = "".join(line.strip() for line in rule_file.readlines())
rule_data = json.loads(data)
requests.delete(url=urljoin(self.kapacitor_url + "/", rule_data.get("id"))) # delete in case of a task with the same ID already set in the kapacitor
requests.post(url=self.kapacitor_url, data=data, headers={"Content-Type": "application/json"})
r = requests.post(url=self.kapacitor_url, data=data, headers={"Content-Type": "application/json"})
assert r.status_code == 200, "Couldn't create alert rule {0}".format(rule)
print("\nSuccessfully created test rule {0}".format(rule))
yield
# Deleting the kapacitor rule used for testing - Test tearDown (UnitTest style)
requests.delete(url=urljoin(self.kapacitor_url + "/", rule_data.get("id")))
# check if the log file is created and clean it up
try:
if isfile(log):
remove(log) # delete log file if existing from previous tests
except PermissionError:
system("sudo rm {0}".format(log)) # handles the case for running on linux where permission will be required to delete the old log file
yield
@staticmethod
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment