Skip to content
Snippets Groups Projects
Commit e983a001 authored by Nikolay Stanchev's avatar Nikolay Stanchev
Browse files

[ Issue #56 ] - Adjusted test to use pytest parameters decorator

parent 3758c78f
No related branches found
No related tags found
No related merge requests found
......@@ -87,38 +87,10 @@ The **clmc-service** vm includes influx, Kapacitor and Chronograf. The following
#### Running the streaming-sim test
**needs to be updated once we have this in pytest format**
SSH into the CLMC server
`vagrant --fixture=streaming-sim -- ssh clmc-service`
In the queryTests JSON file, define the queries that would be under test as well as the expected result for the given query. The format
is straightforward:
```json
{
"[queryReference]" : {
"query" : "[queryToBeExecuted]",
"result" : "[queryExpectedResult]"
}
}
```
where "[queryReference]" , "[queryToBeExecuted]" and "[queryExpectedResult]" are replaced with the values for testing.
Another thing to note is that instead of escaping double quotes in the testing values, one can use single quotes and the python
script would automatically adjust these when reading the json file. E.g.
Instead of this:
```json
{
"query": "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"cpu_usage\""
}
```
use this
```json
{
"query": "SELECT count(*) FROM 'CLMCMetrics'.'autogen'.'cpu_usage'"
}
```
The next step is to generate the test data, which could be done in two ways.
First option is to run a python script to generate the test data sets
......@@ -136,7 +108,7 @@ using the first option, only the tests would be executed.
The command for running the testing module is
`pytest -s /vagrant/test/streaming-sim/VerifySimResults.py`
`pytest -s /vagrant/test/streaming-sim/test_simresults.py`
The `-s` option in the command is used to output prints used in the test code and is, therefore, optional.
......
......@@ -250,7 +250,7 @@ class Sim(object):
return urllib.request.urlopen(req).read().decode("utf-8").strip()
@pytest.fixture
@pytest.fixture(scope='module')
def run_simulation_fixture():
"""
A fixture, which checks if the the DB has been created, if not it runs the simulator with a 10 seconds timeout after that
......@@ -274,7 +274,7 @@ def run_simulation_fixture():
def run_simulation(generate=True):
"""
A method which runs the data generation simulator
:param generate: True for generating data, False for deleting the DB
:param generate: True for generating data, False for deleting the DB (optional argument, if not given, default value True is used)
"""
global INFLUX_DB_URL
......@@ -291,12 +291,14 @@ def run_simulation(generate=True):
if __name__ == "__main__":
"""
The main entry for this module. Code here is executed only if the StreamingSim.py file is executed,
but not when it's imported
but not when it's imported in another module
"""
option = True
# check if there are any command line arguments given when executing the module
if len(sys.argv) > 1:
# if CLI argument '-c' is set when executing the script the influx db will be deleted instead of generating data
# if CLI argument '-c' is set when executing the script, the influx db will be deleted instead of generating data
option = str(sys.argv[1]) != "-c"
run_simulation(generate=option)
run_simulation(generate=option)
else:
# no argument is given to the function call, hence the default value True is used
run_simulation()
{
"cpu_usage": {
"query": "SELECT count(*) FROM 'CLMCMetrics'.'autogen'.'cpu_usage'",
"expected_result": "{'results':[{'statement_id':0,'series':[{'name':'cpu_usage','columns':['time','count_cpu_active_time','count_cpu_idle_time','count_cpu_usage'],'values':[['1970-01-01T00:00:00Z',7200,7200,7200]]}]}]}"
},
"ipendpoint_route": {
"query": "SELECT count(*) FROM 'CLMCMetrics'.'autogen'.'ipendpoint_route'",
"expected_result": "{'results':[{'statement_id':0,'series':[{'name':'ipendpoint_route','columns':['time','count_http_requests_fqdn_m','count_network_fqdn_latency'],'values':[['1970-01-01T00:00:00Z',7200,7200]]}]}]}"
},
"mpegdash_service": {
"query": "SELECT count(*) FROM 'CLMCMetrics'.'autogen'.'mpegdash_service'",
"expected_result": "{'results':[{'statement_id':0,'series':[{'name':'mpegdash_service','columns':['time','count_avg_response_time','count_peak_response_time','count_requests'],'values':[['1970-01-01T00:00:00Z',7200,7200,7200]]}]}]}"
},
"net_port_io": {
"query": "SELECT count(*) FROM 'CLMCMetrics'.'autogen'.'net_port_io'",
"expected_result": "{'results':[{'statement_id':0,'series':[{'name':'net_port_io','columns':['time','count_RX_BYTES_PORT_M','count_TX_BYTES_PORT_M'],'values':[['1970-01-01T00:00:00Z',7200,7200]]}]}]}"
},
"vm_res_alloc": {
"query": "SELECT count(*) FROM 'CLMCMetrics'.'autogen'.'vm_res_alloc'",
"expected_result": "{'results':[{'statement_id':0,'series':[{'name':'vm_res_alloc','columns':['time','count_cpu','count_memory','count_storage'],'values':[['1970-01-01T00:00:00Z',12,12,12]]}]}]}"
}
}
\ No newline at end of file
......@@ -2,54 +2,39 @@
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from json import loads
from os.path import join, dirname
from pytest import fixture
import pytest
from StreamingSim import run_simulation_fixture
class TestSimulation(object):
"""
A 'testing' class used to group all the tests related to the simulation data
A testing class used to group all the tests related to the simulation data
"""
def test_simulation(self, run_simulation_fixture, queries_to_test):
@pytest.mark.parametrize("query, expected_result", [
("SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"cpu_usage\"", "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu_usage\",\"columns\":[\"time\",\"count_cpu_active_time\",\"count_cpu_idle_time\",\"count_cpu_usage\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200,7200]]}]}]}"),
("SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"ipendpoint_route\"", "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"ipendpoint_route\",\"columns\":[\"time\",\"count_http_requests_fqdn_m\",\"count_network_fqdn_latency\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200]]}]}]}"),
("SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"mpegdash_service\"", "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"mpegdash_service\",\"columns\":[\"time\",\"count_avg_response_time\",\"count_peak_response_time\",\"count_requests\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200,7200]]}]}]}"),
("SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"net_port_io\"", "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"net_port_io\",\"columns\":[\"time\",\"count_RX_BYTES_PORT_M\",\"count_TX_BYTES_PORT_M\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200]]}]}]}"),
("SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"vm_res_alloc\"", "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"vm_res_alloc\",\"columns\":[\"time\",\"count_cpu\",\"count_memory\",\"count_storage\"],\"values\":[[\"1970-01-01T00:00:00Z\",12,12,12]]}]}]}")
])
def test_simulation(self, query, expected_result, run_simulation_fixture):
"""
This is the entry point of the test. This method will be found and executed when the module is ran using pytest
:param query: the query to execute (value obtained from the pytest parameter decorator)
:param expected_result: the result expected from executing the query (value obtained from the pytest parameter decorator)
:param run_simulation_fixture: the imported fixture to use to generate the testing data - the return value of the fixture is not needed in this case
:param queries_to_test: the fixture to use - returns a JSON object represented as a python dictionary
"""
print("\n") # prints a blank line for formatting purposes
# go through all query references in the dictionary and execute a test for each of them
for query_key in queries_to_test:
query_to_execute = queries_to_test[query_key]["query"]
query_actual_result = self.send_query("http://localhost:8086", query_to_execute)
query_expected_result = queries_to_test[query_key]["expected_result"]
self.check_result(query_key, query_actual_result, query_expected_result, queries_to_test.keys())
print("Successfully passed test for '{0}' query".format(query_key))
@staticmethod
def check_result(query_key, query_actual_result, query_expected_result, queries_under_test):
"""
A test for a single query result given the query reference and the actual result from executing it
:param query_key: the executed query reference, which is under test
:param query_actual_result: the result from the executed query
:param query_expected_result: the expected result for the executed query
:param queries_under_test: the set of query keys, which are being tested
"""
# pytest automatically goes through all queries under test, declared in the parameters decorator
actual_result = self.send_query("http://localhost:8086", query)
assert query_key is not None, "The query key argument must be an existing object."
assert query_actual_result is not None, "The query's actual result argument must be an existing object."
assert expected_result == actual_result, "Simulation test failure"
assert query_key in queries_under_test, "The query reference {0} is not found in the queries under test.".format(query_key)
assert query_expected_result == query_actual_result, "Simulation test failure"
print("Successfully passed test for the following query: {0}".format(query))
@staticmethod
def send_query(url, query):
......@@ -66,15 +51,3 @@ class TestSimulation(object):
result = urlopen(request)
return result.read().decode("utf-8").strip()
@staticmethod
@fixture(scope='class')
def queries_to_test():
"""
A pytest fixture used to read the queries, which would be tested, from a JSON file
:return: a JSON object represented as a python dictionary
"""
to_read = join(dirname(__file__), "queryTests.json")
with open(to_read) as f:
return loads(f.read().replace('\n', '').replace("'", "\\\""))
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment