diff --git a/test/streaming-sim/VerifySimResults.py b/test/streaming-sim/VerifySimResults.py
index 2060a23d578d2b8cb278f9678a98b8e8430c92d5..b68054b2504dc813ebe8ed2e8a00515f206f523f 100644
--- a/test/streaming-sim/VerifySimResults.py
+++ b/test/streaming-sim/VerifySimResults.py
@@ -1,66 +1,69 @@
-import sys
-import urllib.parse
-import urllib.request
-
-queryReference = {
-    "cpu_usage" : "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"cpu_usage\"",
-    "ipendpoint_route" : "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"ipendpoint_route\"",
-    "mpegdash_service" : "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"mpegdash_service\"",
-    "net_port_io" : "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"net_port_io\"",
-    "vm_res_alloc" : "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"vm_res_alloc\""
-}
-
-resultReference = { 
-    "cpu_usage" : "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu_usage\",\"columns\":[\"time\",\"count_cpu_active_time\",\"count_cpu_idle_time\",\"count_cpu_usage\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200,7200]]}]}]}",
-    "ipendpoint_route" : "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"ipendpoint_route\",\"columns\":[\"time\",\"count_http_requests_fqdn_m\",\"count_network_fqdn_latency\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200]]}]}]}",
-    "mpegdash_service" : "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"mpegdash_service\",\"columns\":[\"time\",\"count_avg_response_time\",\"count_peak_response_time\",\"count_requests\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200,7200]]}]}]}",
-    "net_port_io" : "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"net_port_io\",\"columns\":[\"time\",\"count_RX_BYTES_PORT_M\",\"count_TX_BYTES_PORT_M\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200]]}]}]}",
-    "vm_res_alloc" : "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"vm_res_alloc\",\"columns\":[\"time\",\"count_cpu\",\"count_memory\",\"count_storage\"],\"values\":[[\"1970-01-01T00:00:00Z\",12,12,12]]}]}]}"
-}
-
-def checkResult( query, queryResult ):
-    result = False
-
-    if query != None and queryResult != None:
-        if ( query in resultReference ):
-            if ( resultReference[query] == queryResult ):            
-                print ( "Result correct" )
-                result = True
-            else:
-                print ( "Incorrect result for query: " + query )
-                print ( "Expected = " + resultReference[query] )
-                print ( "Result   = " + queryResult )
-        else:
-            print( "Could not find query result for: " + query )
-    else:
-        print( "Could not check result: invalid parameters" )
-
-    return result
-
-def sendInfluxQuery( url, query ):
-    query = urllib.parse.urlencode( {'q': query} )
-    query = query.encode( 'ascii' )
-    req = urllib.request.Request( url + '/query ', query )
-    result = urllib.request.urlopen( req )
-
-    return result.read().decode("utf-8").strip()
-
-# Entry point
-# ---------------------------------------------------------------------------------------
-testFailed = False
-
-for key in list( queryReference ):
-    query = queryReference[key]
-    result = sendInfluxQuery( "http://localhost:8086", query )
-
-    if checkResult( key, result ) == False:
-        testFailed = True
-        break
-
-if testFailed :
-    print( "Failed simulation result test" )
-    sys.exit( 1 )
-else:
-    print( "Test succeeded" )
-
-sys.exit( 0 )
\ No newline at end of file
+from urllib.parse import urlencode
+from urllib.request import Request, urlopen
+
+
+class TestSimulation(object):
+    """
+    A 'testing' class used to group all the tests related to the simulation data
+    """
+
+    # a class variable storing references between keywords and queries, which will be tested
+    query_reference = {
+        "cpu_usage": "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"cpu_usage\"",
+        "ipendpoint_route": "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"ipendpoint_route\"",
+        "mpegdash_service": "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"mpegdash_service\"",
+        "net_port_io": "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"net_port_io\"",
+        "vm_res_alloc": "SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"vm_res_alloc\""
+    }
+
+    # a class variable storing references between keywords and results expected when executing the queries under test
+    result_reference = {
+        "cpu_usage": "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"cpu_usage\",\"columns\":[\"time\",\"count_cpu_active_time\",\"count_cpu_idle_time\",\"count_cpu_usage\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200,7200]]}]}]}",
+        "ipendpoint_route": "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"ipendpoint_route\",\"columns\":[\"time\",\"count_http_requests_fqdn_m\",\"count_network_fqdn_latency\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200]]}]}]}",
+        "mpegdash_service": "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"mpegdash_service\",\"columns\":[\"time\",\"count_avg_response_time\",\"count_peak_response_time\",\"count_requests\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200,7200]]}]}]}",
+        "net_port_io": "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"net_port_io\",\"columns\":[\"time\",\"count_RX_BYTES_PORT_M\",\"count_TX_BYTES_PORT_M\"],\"values\":[[\"1970-01-01T00:00:00Z\",7200,7200]]}]}]}",
+        "vm_res_alloc": "{\"results\":[{\"statement_id\":0,\"series\":[{\"name\":\"vm_res_alloc\",\"columns\":[\"time\",\"count_cpu\",\"count_memory\",\"count_storage\"],\"values\":[[\"1970-01-01T00:00:00Z\",12,12,12]]}]}]}"
+    }
+
+    def test_simulation(self):
+        """
+        This is the entry point of the test. This method will be found and executed when the module is ran using pytest
+        """
+
+        for query_key in self.query_reference:
+            query = self.query_reference[query_key]
+            query_result = self.send_query("http://localhost:8086", query)
+
+            self.check_result(query_key, query_result)
+            print("Successfully passed test for '{0}' query".format(query_key))
+
+    def check_result(self, query, query_result):
+        """
+        A test for a single query result given the query reference and the actual result from executing it
+
+        :param query: the executed query reference, which is under test
+        :param query_result: the result from the executed query
+        """
+
+        assert query is not None, "The query argument must be an existing object."
+        assert query_result is not None, "The query result argument must be an existing object."
+
+        assert query in self.query_reference, "The query reference {0} is not found in the queries under test.".format(query)
+
+        assert self.result_reference[query] == query_result, "Simulation test failure"
+
+    @staticmethod
+    def send_query(url, query):
+        """
+        An auxiliary static method to send a query to a url and retrieve the result
+
+        :param url: the target url to which the query is sent to - a string containing a valid URL address
+        :param query: the query to be executed on the given URL
+        :return: the result of the executed query
+        """
+
+        query = urlencode({"q": query}).encode("ascii")
+        request = Request("{0}/query".format(url), query)
+        result = urlopen(request)
+
+        return result.read().decode("utf-8").strip()