diff --git a/README.md b/README.md
index d141181064efae3c4d097a839d7ae0af05d235ba..5407eb93cd089fcd50bcf3d4c838021c463f8809 100644
--- a/README.md
+++ b/README.md
@@ -93,6 +93,32 @@ SSH into the CLMC server
 
 `vagrant --fixture=streaming-sim -- ssh clmc-service`
 
+In the queryTests JSON file, define the queries that would be under test as well as the expected result for the given query. The format
+is straightforward:
+```json
+{
+"[queryReference]" : {
+    "query" : "[queryToBeExecuted]",
+    "result" : "[queryExpectedResult]"
+  }
+}
+```
+where "[queryReference]" , "[queryToBeExecuted]" and "[queryExpectedResult]" are replaced with the values for testing.
+Another thing to note is that instead of escaping double quotes in the testing values, one can use single quotes and the python
+script would automatically adjust these when reading the json file. E.g.
+Instead of this:
+```json
+{
+"query": "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"cpu_usage\""
+}
+```
+use this
+```json
+{
+"query": "SELECT count(*) FROM 'CLMCMetrics'.'autogen'.'cpu_usage'"
+}
+```
+
 Run a python script to generate the test data sets
 
 `python3 /vagrant/test/streaming-sim/StreamingSim.py`
diff --git a/test/streaming-sim/VerifySimResults.py b/test/streaming-sim/VerifySimResults.py
index b68054b2504dc813ebe8ed2e8a00515f206f523f..dd71e5e375a10b6f552d65c4dcf4e26edcff23aa 100644
--- a/test/streaming-sim/VerifySimResults.py
+++ b/test/streaming-sim/VerifySimResults.py
@@ -1,5 +1,8 @@
 from urllib.parse import urlencode
 from urllib.request import Request, urlopen
+from json import loads
+from os.path import join, dirname
+import pytest
 
 
 class TestSimulation(object):
@@ -7,50 +10,41 @@ class TestSimulation(object):
     A 'testing' class used to group all the tests related to the simulation data
     """
 
-    # a class variable storing references between keywords and queries, which will be tested
-    query_reference = {
-        "cpu_usage": "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"cpu_usage\"",
-        "ipendpoint_route": "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"ipendpoint_route\"",
-        "mpegdash_service": "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"mpegdash_service\"",
-        "net_port_io": "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"net_port_io\"",
-        "vm_res_alloc": "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"vm_res_alloc\""
-    }
-
-    # a class variable storing references between keywords and results expected when executing the queries under test
-    result_reference = {
-        "cpu_usage": "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu_usage\",\"columns\":[\"time\",\"count_cpu_active_time\",\"count_cpu_idle_time\",\"count_cpu_usage\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200,7200]]}]}]}",
-        "ipendpoint_route": "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"ipendpoint_route\",\"columns\":[\"time\",\"count_http_requests_fqdn_m\",\"count_network_fqdn_latency\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200]]}]}]}",
-        "mpegdash_service": "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"mpegdash_service\",\"columns\":[\"time\",\"count_avg_response_time\",\"count_peak_response_time\",\"count_requests\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200,7200]]}]}]}",
-        "net_port_io": "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"net_port_io\",\"columns\":[\"time\",\"count_RX_BYTES_PORT_M\",\"count_TX_BYTES_PORT_M\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200]]}]}]}",
-        "vm_res_alloc": "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"vm_res_alloc\",\"columns\":[\"time\",\"count_cpu\",\"count_memory\",\"count_storage\"],\"values\":[[\"1970-01-01T00:00:00Z\",12,12,12]]}]}]}"
-    }
-
-    def test_simulation(self):
+    def test_simulation(self, queries_to_test):
         """
         This is the entry point of the test. This method will be found and executed when the module is ran using pytest
+
+        :param queries_to_test: the fixture to use - returns a JSON object represented as a python dictionary
         """
 
-        for query_key in self.query_reference:
-            query = self.query_reference[query_key]
-            query_result = self.send_query("http://localhost:8086", query)
+        print("\n")  # prints a blank line for formatting purposes
+
+        # go through all query references in the dictionary and execute a test for each of them
+        for query_key in queries_to_test:
+            query_to_execute = queries_to_test[query_key]["query"]
+            query_actual_result = self.send_query("http://localhost:8086", query_to_execute)
+            query_expected_result = queries_to_test[query_key]["expected_result"]
 
-            self.check_result(query_key, query_result)
+            self.check_result(query_key, query_actual_result, query_expected_result, queries_to_test.keys())
             print("Successfully passed test for '{0}' query".format(query_key))
 
-    def check_result(self, query, query_result):
+    @staticmethod
+    def check_result(query_key, query_actual_result, query_expected_result, queries_under_test):
         """
         A test for a single query result given the query reference and the actual result from executing it
 
-        :param query: the executed query reference, which is under test
-        :param query_result: the result from the executed query
+        :param query_key: the executed query reference, which is under test
+        :param query_actual_result: the result from the executed query
+        :param query_expected_result: the expected result for the executed query
+        :param queries_under_test: the set of query keys, which are being tested
         """
 
-        assert query is not None, "The query argument must be an existing object."
-        assert query_result is not None, "The query result argument must be an existing object."
+        assert query_key is not None, "The query key argument must be an existing object."
+        assert query_actual_result is not None, "The query's actual result argument must be an existing object."
 
-        assert query in self.query_reference, "The query reference {0} is not found in the queries under test.".format(query)
+        assert query_key in queries_under_test, "The query reference {0} is not found in the queries under test.".format(query_key)
 
-        assert self.result_reference[query] == query_result, "Simulation test failure"
+        assert query_expected_result == query_actual_result, "Simulation test failure"
 
     @staticmethod
     def send_query(url, query):
@@ -67,3 +61,15 @@ class TestSimulation(object):
         result = urlopen(request)
 
         return result.read().decode("utf-8").strip()
+
+    @staticmethod
+    @pytest.fixture
+    def queries_to_test():
+        """
+        A pytest fixture used to read the queries, which would be tested, from a JSON file
+        :return: a JSON object represented as a python dictionary
+        """
+
+        to_read = join(dirname(__file__), "queryTests.json")
+        with open(to_read) as f:
+            return loads(f.read().replace('\n', '').replace("'", "\\\""))
diff --git a/test/streaming-sim/queryTests.json b/test/streaming-sim/queryTests.json
new file mode 100644
index 0000000000000000000000000000000000000000..b53b078133f464b9bc7e6e528926a2be12134eb0
--- /dev/null
+++ b/test/streaming-sim/queryTests.json
@@ -0,0 +1,22 @@
+{
+  "cpu_usage": {
+    "query": "SELECT count(*) FROM 'CLMCMetrics'.'autogen'.'cpu_usage'",
+    "expected_result": "{'results':[{'statement_id':0,'series':[{'name':'cpu_usage','columns':['time','count_cpu_active_time','count_cpu_idle_time','count_cpu_usage'],'values':[['1970-01-01T00:00:00Z',7200,7200,7200]]}]}]}"
+  },
+  "ipendpoint_route": {
+    "query": "SELECT count(*) FROM 'CLMCMetrics'.'autogen'.'ipendpoint_route'",
+    "expected_result": "{'results':[{'statement_id':0,'series':[{'name':'ipendpoint_route','columns':['time','count_http_requests_fqdn_m','count_network_fqdn_latency'],'values':[['1970-01-01T00:00:00Z',7200,7200]]}]}]}"
+  },
+  "mpegdash_service": {
+    "query": "SELECT count(*) FROM 'CLMCMetrics'.'autogen'.'mpegdash_service'",
+    "expected_result": "{'results':[{'statement_id':0,'series':[{'name':'mpegdash_service','columns':['time','count_avg_response_time','count_peak_response_time','count_requests'],'values':[['1970-01-01T00:00:00Z',7200,7200,7200]]}]}]}"
+  },
+  "net_port_io": {
+    "query": "SELECT count(*) FROM 'CLMCMetrics'.'autogen'.'net_port_io'",
+    "expected_result": "{'results':[{'statement_id':0,'series':[{'name':'net_port_io','columns':['time','count_RX_BYTES_PORT_M','count_TX_BYTES_PORT_M'],'values':[['1970-01-01T00:00:00Z',7200,7200]]}]}]}"
+  },
+  "vm_res_alloc": {
+    "query": "SELECT count(*) FROM 'CLMCMetrics'.'autogen'.'vm_res_alloc'",
+    "expected_result": "{'results':[{'statement_id':0,'series':[{'name':'vm_res_alloc','columns':['time','count_cpu','count_memory','count_storage'],'values':[['1970-01-01T00:00:00Z',12,12,12]]}]}]}"
+  }
+}
\ No newline at end of file