Select Git revision
-
Reuben Pengelly authoredReuben Pengelly authored
test_simresults.py 8.97 KiB
#!/usr/bin/python3
import pytest
import time
import random
class TestSimulation(object):
"""
A testing class used to group all the tests related to the simulation data
"""
@pytest.fixture(scope='class', autouse=True)
def run_simulator(self, simulator):
random.seed(0) # Seed random function so we can reliably test for average queries
print("Running simulation, please wait...")
simulator.run(3600)
print("Waiting for INFLUX to finish receiving simulation data...")
time.sleep(10) # wait for data to finish arriving at the INFLUX database
print( "... simulation data fixture finished" )
@pytest.mark.parametrize("query, expected_result", [
('SELECT count(*) FROM "CLMCMetrics"."autogen"."cpu_usage"',
{"time": "1970-01-01T00:00:00Z", "count_cpu_active_time": 7200, "count_cpu_idle_time": 7200, "count_cpu_usage": 7200}),
('SELECT count(*) FROM "CLMCMetrics"."autogen"."ipendpoint_route"',
{"time": "1970-01-01T00:00:00Z", "count_http_requests_fqdn_m": 7200, "count_network_fqdn_latency": 7200}),
('SELECT count(*) FROM "CLMCMetrics"."autogen"."mpegdash_service"',
{"time": "1970-01-01T00:00:00Z", "count_avg_response_time": 7200, "count_peak_response_time": 7200, "count_requests": 7200}),
('SELECT count(*) FROM "CLMCMetrics"."autogen"."net_port_io"',
{"time": "1970-01-01T00:00:00Z", "count_RX_BYTES_PORT_M": 7200, "count_TX_BYTES_PORT_M": 7200}),
('SELECT count(*) FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE ipendpoint=\'adaptive_streaming_I1_apache1\'',
{"time": "1970-01-01T00:00:00Z", "count_unplaced": 3607, "count_avg_unplaced": 3607, "count_placing": 3607, "count_avg_placing": 3607, "count_placed": 3607, "count_avg_placed": 3607, "count_booting": 3607, "count_avg_booting": 3607, "count_booted": 3607,
"count_avg_booted": 3607, "count_connecting": 3607, "count_avg_connecting": 3607, "count_connected": 3607, "count_avg_connected": 3607, "count_cpus": 3607, "count_memory": 3607, "count_storage": 3607}),
('SELECT count(*) FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE ipendpoint=\'adaptive_streaming_I1_apache2\'',
{"time": "1970-01-01T00:00:00Z", "count_unplaced": 3607, "count_avg_unplaced": 3607, "count_placing": 3607, "count_avg_placing": 3607, "count_placed": 3607, "count_avg_placed": 3607, "count_booting": 3607, "count_avg_booting": 3607, "count_booted": 3607,
"count_avg_booted": 3607, "count_connecting": 3607, "count_avg_connecting": 3607, "count_connected": 3607, "count_avg_connected": 3607, "count_cpus": 3607, "count_memory": 3607, "count_storage": 3607}),
('SELECT count(*) FROM "CLMCMetrics"."autogen"."mpegdash_service_config" WHERE ipendpoint=\'adaptive_streaming_I1_apache1\'',
{"time": "1970-01-01T00:00:00Z", "count_avg_running": 3609, "count_avg_starting": 3609, "count_avg_stopped": 3609, "count_avg_stopping": 3609, "count_running": 3609, "count_starting": 3609, "count_stopped": 3609, "count_stopping": 3609}),
('SELECT count(*) FROM "CLMCMetrics"."autogen"."mpegdash_service_config" WHERE ipendpoint=\'adaptive_streaming_I1_apache2\'',
{"time": "1970-01-01T00:00:00Z", "count_avg_running": 3609, "count_avg_starting": 3609, "count_avg_stopped": 3609, "count_avg_stopping": 3609, "count_running": 3609, "count_starting": 3609, "count_stopped": 3609, "count_stopping": 3609}),
('SELECT mean(avg_stopped) as "avg_stopped" FROM "CLMCMetrics"."autogen"."mpegdash_service_config" WHERE avg_stopped <>0',
{"time": "1970-01-01T00:00:00Z", "avg_stopped": 0.15}),
('SELECT mean(avg_starting) as "avg_starting" FROM "CLMCMetrics"."autogen"."mpegdash_service_config" WHERE avg_starting <>0',
{"time": "1970-01-01T00:00:00Z", "avg_starting": 0.9166666666666666}),
('SELECT mean(avg_running) as "avg_running" FROM "CLMCMetrics"."autogen"."mpegdash_service_config" WHERE avg_running <>0',
{"time": "1970-01-01T00:00:00Z", "avg_running": 0.9997502081598669}),
('SELECT mean(avg_stopping) as "avg_stopping" FROM "CLMCMetrics"."autogen"."mpegdash_service_config" WHERE avg_stopping <>0',
{"time": "1970-01-01T00:00:00Z", "avg_stopping": 0.55})
])
def test_simulation(self, influx_db, query, expected_result):
"""
This is the entry point of the test. This method will be found and executed when the module is ran using pytest
:param query: the query to execute (value obtained from the pytest parameter decorator)
:param expected_result: the result expected from executing the query (value obtained from the pytest parameter decorator)
:param influx_db the import db client fixture - imported from contest.py
"""
# pytest automatically goes through all queries under test, declared in the parameters decorator
print("\n") # prints a blank line for formatting purposes
# the raise_errors=False argument is given so that we could actually test that the DB didn't return any errors instead of raising an exception
query_result = influx_db.query(query, raise_errors=False)
# test the error attribute of the result is None, that is no error is returned from executing the DB query
assert query_result.error is None, "An error was encountered while executing query {0}.".format(query)
# get the dictionary of result points; the next() function just gets the first element of the query results generator (we only expect one item in the generator)
actual_result = next(query_result.get_points())
assert expected_result == actual_result, "Simulation test failure"
print("Successfully passed test for the following query: {0}".format(query))
@pytest.mark.parametrize("query, field", [
('SELECT mean("placing") as "mean_transition_placing" FROM "CLMCMetrics"."autogen"."endpoint_config" where "placing" <> 0 and "placed" <> 0 and "ipendpoint"=\'adaptive_streaming_I1_apache1\'',
'mean_transition_placing'),
('SELECT mean("placing") as "mean_target_placing" FROM "CLMCMetrics"."autogen"."endpoint_config" where "placing" <> 0 and "unplaced" <> 0 and "ipendpoint"=\'adaptive_streaming_I1_apache1\'',
'mean_target_placing'),
('SELECT mean("booting") as "mean_transition_booting" FROM "CLMCMetrics"."autogen"."endpoint_config" where "booting" <> 0 and "booted" <> 0 and "ipendpoint"=\'adaptive_streaming_I1_apache1\'',
'mean_transition_booting'),
('SELECT mean("booting") as "mean_target_booting" FROM "CLMCMetrics"."autogen"."endpoint_config" where "booting" <> 0 and "placed" <> 0 and "ipendpoint"=\'adaptive_streaming_I1_apache2\'',
'mean_target_booting'),
('SELECT mean("connecting") as "mean_transition_connecting" FROM "CLMCMetrics"."autogen"."endpoint_config" where "connecting" <> 0 and "connected" <> 0 and "ipendpoint"=\'adaptive_streaming_I1_apache2\'',
'mean_transition_connecting'),
('SELECT mean("connecting") as "mean_target_connecting" FROM "CLMCMetrics"."autogen"."endpoint_config" where "connecting" <> 0 and "booted" <> 0 and "ipendpoint"=\'adaptive_streaming_I1_apache2\'',
'mean_target_connecting'),
])
def test_mean_config_queries(self, influx_db, query, field):
"""
Test queries for mean values in the configuration states model.
- 'mean_transition_{state}' - we want to know the mean time spent on the given state in cases where this was the actual transition state.
e.g. 'mean_transition_placing' - refers to the mean time spent on state 'placing' in transitions such as 'placing' -> 'placed'
- 'mean_target_{state}' - we want to know the mean time spent on the given state in cases where this was the actual target state
e.g. 'mean_target_placing' - refers to the mean time spent on state 'placing' in transitions such as 'unplaced' -> 'placing'
:param influx_db: influx db client
:param query: query under test
:param field: the field id to fetch
"""
# pytest automatically goes through all queries under test, declared in the parameters decorator
print("\n") # prints a blank line for formatting purposes
# the raise_errors=False argument is given so that we could actually test that the DB didn't return any errors instead of raising an exception
query_result = influx_db.query(query, raise_errors=False)
# test the error attribute of the result is None, that is no error is returned from executing the DB query
assert query_result.error is None, "An error was encountered while executing query {0}.".format(query)
# get the dictionary of result points; the next() function just gets the first element of the query results generator (we only expect one item in the generator)
result = next(query_result.get_points()).get(field)
assert float(result) >= 0.0, "Test failure. Reported mean values cannot be negative."
print("Successfully passed test for the following query: {0}".format(query))