diff --git a/README.md b/README.md
index 48e1606100cbd30ef3a1fd4e53c5d5b70f24fe65..4739ab358961acac0889245b2c1e4e4b60d953e2 100644
--- a/README.md
+++ b/README.md
@@ -87,38 +87,10 @@ The **clmc-service** vm includes influx, Kapacitor and Chronograf. The following
 
 #### Running the streaming-sim test
 
-**needs to be updated once we have this in pytest format**
-
 SSH into the CLMC server
 
 `vagrant --fixture=streaming-sim -- ssh clmc-service`
 
-In the queryTests JSON file, define the queries that would be under test as well as the expected result for the given query. The format
-is straightforward:
-```json
-{
-"[queryReference]" : {
-    "query" : "[queryToBeExecuted]",
-    "result" : "[queryExpectedResult]"
-  }
-}
-```
-where "[queryReference]" , "[queryToBeExecuted]" and "[queryExpectedResult]" are replaced with the values for testing.
-Another thing to note is that instead of escaping double quotes in the testing values, one can use single quotes and the python
-script would automatically adjust these when reading the json file. E.g.
-Instead of this:
-```json
-{
-"query": "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"cpu_usage\""
-}
-```
-use this
-```json
-{
-"query": "SELECT count(*) FROM 'CLMCMetrics'.'autogen'.'cpu_usage'"
-}
-```
-
 The next step is to generate the test data, which could be done in two ways.
 
 First option is to run a python script to generate the test data sets
@@ -136,7 +108,7 @@ using the first option, only the tests would be executed.
 
 The command for running the testing module is
 
-`pytest -s /vagrant/test/streaming-sim/VerifySimResults.py`
+`pytest -s /vagrant/test/streaming-sim/test_simresults.py`
 
 The `-s` option in the command is used to output prints used in the test code and is, therefore, optional.
 
diff --git a/test/streaming-sim/StreamingSim.py b/test/streaming-sim/StreamingSim.py
index 3eebf1c3653418b0b638963c9b58d69c84ceb186..b5a9b219b75fa795f46d6fb44b819b2132f4fffd 100644
--- a/test/streaming-sim/StreamingSim.py
+++ b/test/streaming-sim/StreamingSim.py
@@ -250,7 +250,7 @@ class Sim(object):
         return urllib.request.urlopen(req).read().decode("utf-8").strip()
 
 
-@pytest.fixture
+@pytest.fixture(scope='module')
 def run_simulation_fixture():
     """
     A fixture, which checks if the the DB has been created, if not it runs the simulator with a 10 seconds timeout after that
@@ -274,7 +274,7 @@ def run_simulation_fixture():
 def run_simulation(generate=True):
     """
     A method which runs the data generation simulator
-    :param generate: True for generating data, False for deleting the DB
+    :param generate: True for generating data, False for deleting the DB (optional argument, if not given, default value True is used)
     """
 
     global INFLUX_DB_URL
@@ -291,12 +291,14 @@ def run_simulation(generate=True):
 if __name__ == "__main__":
     """
     The main entry for this module. Code here is executed only if the StreamingSim.py file is executed, 
-    but not when it's imported
+    but not when it's imported in another module
     """
 
-    option = True
+    # check if there are any command line arguments given when executing the module
     if len(sys.argv) > 1:
-        # if CLI argument '-c' is set when executing the script the influx db will be deleted instead of generating data
+        # if CLI argument '-c' is set when executing the script, the influx db will be deleted instead of generating data
         option = str(sys.argv[1]) != "-c"
-
-    run_simulation(generate=option)
+        run_simulation(generate=option)
+    else:
+        # no argument is given to the function call, hence the default value True is used
+        run_simulation()
diff --git a/test/streaming-sim/VerifySimResults.py b/test/streaming-sim/VerifySimResults.py
deleted file mode 100644
index 6c94d6e765fa97cb83ff70d52c0f7551d3124d3a..0000000000000000000000000000000000000000
--- a/test/streaming-sim/VerifySimResults.py
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/python3
-
-from urllib.parse import urlencode
-from urllib.request import Request, urlopen
-from json import loads
-from os.path import join, dirname
-from pytest import fixture
-
-from StreamingSim import run_simulation_fixture
-
-
-class TestSimulation(object):
-    """
-    A 'testing' class used to group all the tests related to the simulation data
-    """
-
-    def test_simulation(self, run_simulation_fixture, queries_to_test):
-        """
-        This is the entry point of the test. This method will be found and executed when the module is ran using pytest
-
-        :param run_simulation_fixture: the imported fixture to use to generate the testing data - the return value of the fixture is not needed in this case
-        :param queries_to_test: the fixture to use - returns a JSON object represented as a python dictionary
-        """
-
-        print("\n")  # prints a blank line for formatting purposes
-
-        # go through all query references in the dictionary and execute a test for each of them
-        for query_key in queries_to_test:
-            query_to_execute = queries_to_test[query_key]["query"]
-            query_actual_result = self.send_query("http://localhost:8086", query_to_execute)
-            query_expected_result = queries_to_test[query_key]["expected_result"]
-
-            self.check_result(query_key, query_actual_result, query_expected_result, queries_to_test.keys())
-            print("Successfully passed test for '{0}' query".format(query_key))
-
-    @staticmethod
-    def check_result(query_key, query_actual_result, query_expected_result, queries_under_test):
-        """
-        A test for a single query result given the query reference and the actual result from executing it
-
-        :param query_key: the executed query reference, which is under test
-        :param query_actual_result: the result from the executed query
-        :param query_expected_result: the expected result for the executed query
-        :param queries_under_test: the set of query keys, which are being tested
-        """
-
-        assert query_key is not None, "The query key argument must be an existing object."
-        assert query_actual_result is not None, "The query's actual result argument must be an existing object."
-
-        assert query_key in queries_under_test, "The query reference {0} is not found in the queries under test.".format(query_key)
-
-        assert query_expected_result == query_actual_result, "Simulation test failure"
-
-    @staticmethod
-    def send_query(url, query):
-        """
-        An auxiliary static method to send a query to a url and retrieve the result
-
-        :param url: the target url to which the query is sent to - a string containing a valid URL address
-        :param query: the query to be executed on the given URL
-        :return: the result of the executed query
-        """
-
-        query = urlencode({"q": query}).encode("ascii")
-        request = Request("{0}/query".format(url), query)
-        result = urlopen(request)
-
-        return result.read().decode("utf-8").strip()
-
-    @staticmethod
-    @fixture(scope='class')
-    def queries_to_test():
-        """
-        A pytest fixture used to read the queries, which would be tested, from a JSON file
-        :return: a JSON object represented as a python dictionary
-        """
-
-        to_read = join(dirname(__file__), "queryTests.json")
-        with open(to_read) as f:
-            return loads(f.read().replace('\n', '').replace("'", "\\\""))
diff --git a/test/streaming-sim/queryTests.json b/test/streaming-sim/queryTests.json
deleted file mode 100644
index b53b078133f464b9bc7e6e528926a2be12134eb0..0000000000000000000000000000000000000000
--- a/test/streaming-sim/queryTests.json
+++ /dev/null
@@ -1,22 +0,0 @@
-{
-  "cpu_usage": {
-    "query": "SELECT count(*) FROM 'CLMCMetrics'.'autogen'.'cpu_usage'",
-    "expected_result": "{'results':[{'statement_id':0,'series':[{'name':'cpu_usage','columns':['time','count_cpu_active_time','count_cpu_idle_time','count_cpu_usage'],'values':[['1970-01-01T00:00:00Z',7200,7200,7200]]}]}]}"
-  },
-  "ipendpoint_route": {
-    "query": "SELECT count(*) FROM 'CLMCMetrics'.'autogen'.'ipendpoint_route'",
-    "expected_result": "{'results':[{'statement_id':0,'series':[{'name':'ipendpoint_route','columns':['time','count_http_requests_fqdn_m','count_network_fqdn_latency'],'values':[['1970-01-01T00:00:00Z',7200,7200]]}]}]}"
-  },
-  "mpegdash_service": {
-    "query": "SELECT count(*) FROM 'CLMCMetrics'.'autogen'.'mpegdash_service'",
-    "expected_result": "{'results':[{'statement_id':0,'series':[{'name':'mpegdash_service','columns':['time','count_avg_response_time','count_peak_response_time','count_requests'],'values':[['1970-01-01T00:00:00Z',7200,7200,7200]]}]}]}"
-  },
-  "net_port_io": {
-    "query": "SELECT count(*) FROM 'CLMCMetrics'.'autogen'.'net_port_io'",
-    "expected_result": "{'results':[{'statement_id':0,'series':[{'name':'net_port_io','columns':['time','count_RX_BYTES_PORT_M','count_TX_BYTES_PORT_M'],'values':[['1970-01-01T00:00:00Z',7200,7200]]}]}]}"
-  },
-  "vm_res_alloc": {
-    "query": "SELECT count(*) FROM 'CLMCMetrics'.'autogen'.'vm_res_alloc'",
-    "expected_result": "{'results':[{'statement_id':0,'series':[{'name':'vm_res_alloc','columns':['time','count_cpu','count_memory','count_storage'],'values':[['1970-01-01T00:00:00Z',12,12,12]]}]}]}"
-  }
-}
\ No newline at end of file
diff --git a/test/streaming-sim/test_simresults.py b/test/streaming-sim/test_simresults.py
new file mode 100644
index 0000000000000000000000000000000000000000..66fb79a3442245ed5f845e385ed42a449359c80c
--- /dev/null
+++ b/test/streaming-sim/test_simresults.py
@@ -0,0 +1,53 @@
+#!/usr/bin/python3
+
+from urllib.parse import urlencode
+from urllib.request import Request, urlopen
+import pytest
+from StreamingSim import run_simulation_fixture
+
+
+class TestSimulation(object):
+    """
+    A testing class used to group all the tests related to the simulation data
+    """
+
+    @pytest.mark.parametrize("query, expected_result", [
+        ("SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"cpu_usage\"", "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu_usage\",\"columns\":[\"time\",\"count_cpu_active_time\",\"count_cpu_idle_time\",\"count_cpu_usage\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200,7200]]}]}]}"),
+        ("SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"ipendpoint_route\"", "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"ipendpoint_route\",\"columns\":[\"time\",\"count_http_requests_fqdn_m\",\"count_network_fqdn_latency\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200]]}]}]}"),
+        ("SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"mpegdash_service\"", "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"mpegdash_service\",\"columns\":[\"time\",\"count_avg_response_time\",\"count_peak_response_time\",\"count_requests\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200,7200]]}]}]}"),
+        ("SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"net_port_io\"", "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"net_port_io\",\"columns\":[\"time\",\"count_RX_BYTES_PORT_M\",\"count_TX_BYTES_PORT_M\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200]]}]}]}"),
+        ("SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"vm_res_alloc\"", "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"vm_res_alloc\",\"columns\":[\"time\",\"count_cpu\",\"count_memory\",\"count_storage\"],\"values\":[[\"1970-01-01T00:00:00Z\",12,12,12]]}]}]}")
+    ])
+    def test_simulation(self, query, expected_result, run_simulation_fixture):
+        """
+        This is the entry point of the test. This method will be found and executed when the module is ran using pytest
+
+        :param query: the query to execute (value obtained from the pytest parameter decorator)
+        :param expected_result: the result expected from executing the query (value obtained from the pytest parameter decorator)
+        :param run_simulation_fixture: the imported fixture to use to generate the testing data - the return value of the fixture is not needed in this case
+        """
+
+        print("\n")  # prints a blank line for formatting purposes
+
+        # pytest automatically goes through all queries under test, declared in the parameters decorator
+        actual_result = self.send_query("http://localhost:8086", query)
+
+        assert expected_result == actual_result, "Simulation test failure"
+
+        print("Successfully passed test for the following query: {0}".format(query))
+
+    @staticmethod
+    def send_query(url, query):
+        """
+        An auxiliary static method to send a query to a url and retrieve the result
+
+        :param url: the target url to which the query is sent to - a string containing a valid URL address
+        :param query: the query to be executed on the given URL
+        :return: the result of the executed query
+        """
+
+        query = urlencode({"q": query}).encode("ascii")
+        request = Request("{0}/query".format(url), query)
+        result = urlopen(request)
+
+        return result.read().decode("utf-8").strip()