diff --git a/src/service/clmcservice/graphapi/conftest.py b/src/service/clmcservice/graphapi/conftest.py
index 4e00b16894c37235e7a152fd7df2ffa924534690..e36ac06f72af6e96dc17dcb2ec5cee5efb611aee 100644
--- a/src/service/clmcservice/graphapi/conftest.py
+++ b/src/service/clmcservice/graphapi/conftest.py
@@ -27,6 +27,8 @@ from influxdb import InfluxDBClient
 from clmcservice.generate_network_measurements import report_network_measurements
 from py2neo import Graph
 
+
+# static network configuration data used for testing cases
 network_config = {
     "bandwidth": 104857600,
     "links": [
@@ -121,32 +123,37 @@ network_config = {
 @pytest.fixture(scope='module', autouse=True)
 def db_testing_data():
     """
-    This fixture generates some testing data in influx to be used for testing, after which it clears up the DB.
+    This fixture generates testing data in influx to be used in the various test methods, after which it clears up the neo4j and influx databases.
 
-    :return: a pair of time stamps defining the from-to range for which the test data is reported
+    :return: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object
     """
 
     global network_config
 
     test_db_name = "TestInfluxDB"
 
+    # ASSUMES both Influx and Neo4j are running on localhost with default ports
     influx = InfluxDBClient(host="localhost", port=8086, timeout=10)
     graph = Graph(host="localhost", password="admin")
-    graph.delete_all()
+    graph.delete_all()  # clear the graph db before testing
 
+    # create the physical infrastructure subgraph
     dbs = influx.get_list_database()
     if "CLMCMetrics" not in dbs:
         influx.create_database("CLMCMetrics")
     report_network_measurements("localhost", "CLMCMetrics", network_config, "localhost", "admin")
 
+    # check if exists ( if so, clear ) or create the test DB in influx
     if test_db_name in dbs:
         influx.drop_database(test_db_name)
     influx.create_database(test_db_name)
     influx.switch_database(test_db_name)
 
+    # time range for which the data is reported
     from_timestamp = 1528385860
     to_timestamp = 1528685860
 
+    # nginx data to report to influx
     data = [
         ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 5, 20, 1500, 15000, 1528385860),
         ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 8, 35, 1000, 11000, 1528385860),
@@ -167,6 +174,7 @@ def db_testing_data():
          } for host, endpoint, location, sf, sf_i, sfc, sfc_i, sr, num_requests, processing_time, request_size, response_size, timestamp in data
     ])
 
+    # minio data to report to influx
     data = [
         ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 12, 86, 101000, 4700, 1528386860),
         ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 15, 75, 96000, 6300, 1528386860),
@@ -187,6 +195,7 @@ def db_testing_data():
          } for host, endpoint, location, sf, sf_i, sfc, sfc_i, sr, num_requests, processing_time, request_size, response_size, timestamp in data
     ])
 
+    # apache data to report to influx
     data = [
         ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 15, 1400, 15600, 1528386860),
         ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 17, 2200, 11200, 1528388860),
@@ -204,6 +213,7 @@ def db_testing_data():
 
     yield from_timestamp, to_timestamp, test_db_name, graph
 
+    # clean up after the test is over - delete the test databases and clear up the graph
     influx.drop_database("CLMCMetrics")
     influx.drop_database("TestInfluxDB")
     graph.delete_all()
diff --git a/src/service/clmcservice/graphapi/tests.py b/src/service/clmcservice/graphapi/tests.py
index 1aacb5c5c6e4c2450ac1e4b0cb0a177290869727..9dcd23eaa93819f0743dcab77756969ac53d9fe5 100644
--- a/src/service/clmcservice/graphapi/tests.py
+++ b/src/service/clmcservice/graphapi/tests.py
@@ -76,13 +76,14 @@ class TestGraphAPI(object):
     ])
     def test_build_error_handling(self, body, from_timestamp, to_timestamp, error_msg):
         """
-        Tests the error handling of the graph build API endpoint.
+        Tests the error handling of the graph build API endpoint by passing erroneous input and confirming an HTTPBadRequest was returned.
 
         :param body: body of the request to test
         :param from_timestamp: the 'from' URL param
         :param to_timestamp: the 'to' URL param
-        :param error_msg: the error message to pass in case of an error
+        :param error_msg: the error message to pass in case of an error not being properly handled by the API endpoint (in other words, a test failure)
         """
+
         request = testing.DummyRequest()
         if body is not None:
             request.body = body
@@ -100,12 +101,12 @@ class TestGraphAPI(object):
 
     def test_build(self, db_testing_data):
         """
-        Tests the graph build API endpoint
+        Tests the graph build API endpoint - it makes 2 API calls and checks that the expected graph was created (the influx data that's being used is reported to InfluxDB in the conftest file)
 
-        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client
+        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest)
         """
 
-        global graph_1_id, graph_2_id
+        global graph_1_id, graph_2_id  # these variables are used to store the ID of the graphs that were created during the execution of this test method; they are reused later when testing the delete method
 
         from_timestamp, to_timestamp, test_db_name, graph_db = db_testing_data
 
@@ -131,6 +132,7 @@ class TestGraphAPI(object):
             error_raised = True
         assert error_raised, "A bad request error must have been raised in case of invalid URL parameters."
 
+        # Create a valid build request and send it to the API endpoint
         service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)",
                                         "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
                                  minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
@@ -181,14 +183,15 @@ class TestGraphAPI(object):
             ), graph_db, request_id
         )
 
-        # check endpoint nodes response time property
+        # check endpoint nodes have the correct properties
         for endpoint, response_time, request_size, response_size in (("minio_1_ep1", 9, 5760, 2033), ("nginx_1_ep1", 18.2, 2260, 9660), ("nginx_1_ep2", 22.2, 35600, 6420)):
             endpoint_node = graph_db.nodes.match("Endpoint", name=endpoint, uuid=request_id).first()
             assert endpoint_node["response_time"] == response_time, "Wrong response time property of endpoint node"
+            # approximation is used to avoid long float numbers retrieved from influx, the test case ensures the results are different enough so that approximation of +-1 is good enough for testing
             assert endpoint_node["request_size"] == pytest.approx(request_size, 1), "Wrong request size attribute of endpoint node"
             assert endpoint_node["response_size"] == pytest.approx(response_size, 1), "Wrong response size attribute of endpoint node"
 
-        # send a new request for a new service function chain
+        # send a new request for a new service function chain and check the new subgraph has been created
         service_functions = dict(minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
                                         "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
                                  apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
@@ -206,6 +209,7 @@ class TestGraphAPI(object):
         request_id = graph_subresponse["uuid"]
         graph_2_id = request_id
 
+        # check the new nodes have been created
         assert graph_db.nodes.match("ServiceFunction", name="apache").first() is not None, "Service function apache must have been added to the graph"
 
         for sf_i in ("apache_1", "minio_2"):
@@ -237,18 +241,19 @@ class TestGraphAPI(object):
             ), graph_db, request_id
         )
 
-        # check endpoint nodes response time property
+        # check endpoint nodes have the correct properties
         for endpoint, response_time, request_size, response_size in (("minio_2_ep1", 7, 2998, 3610), ("apache_1_ep1", 17.6, 1480, 7860)):
             endpoint_node = graph_db.nodes.match("Endpoint", name=endpoint, uuid=request_id).first()
             assert endpoint_node["response_time"] == response_time, "Wrong response time property of endpoint node"
+            # approximation is used to avoid long float numbers retrieved from influx, the test case ensures the results are different enough so that approximation of +-1 is good enough for testing
             assert endpoint_node["request_size"] == pytest.approx(request_size, 1), "Wrong request size attribute of endpoint node"
             assert endpoint_node["response_size"] == pytest.approx(response_size, 1), "Wrong response size attribute of endpoint node"
 
     def test_delete(self, db_testing_data):
         """
-        Tests the delete API endpoint of the Graph API
+        Tests the delete API endpoint of the Graph API - the test depends on the build test to have been passed successfully so that graph_1_id and graph_2_id have been set
 
-        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client
+        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest)
         """
 
         global graph_1_id, graph_2_id
@@ -264,11 +269,13 @@ class TestGraphAPI(object):
             error_raised = True
         assert error_raised, "HTTP Not Found error must be raised in case of unrecognized subgraph ID"
 
+        # delete the graph associated with graph_1_id
         request = testing.DummyRequest()
         request.matchdict["graph_id"] = graph_1_id
         response = GraphAPI(request).delete_temporal_graph()
         assert response == {"uuid": graph_1_id, "deleted": 4}, "Incorrect response when deleting temporal graph"
 
+        # delete the graph associated with graph_2_id
         request = testing.DummyRequest()
         request.matchdict["graph_id"] = graph_2_id
         response = GraphAPI(request).delete_temporal_graph()
@@ -288,12 +295,12 @@ class TestGraphAPI(object):
     ])
     def test_rtt_error_handling(self, graph_id, endpoint, compute_node, error_type, error_msg):
         """
-        Tests the error handling of the graph round trip time API endpoint.
+        Tests the error handling of the graph round trip time API endpoint - achieved by sending erroneous input in the request and verifying the appropriate error type has been returned.
 
         :param graph_id: the UUID of the subgraph
         :param endpoint: endpoint ID
         :param compute_node: compute node ID
-        :param error_type: error type to expect
+        :param error_type: error type to expect as a response
         :param error_msg: error message in case of a test failure
         """
 
@@ -312,13 +319,14 @@ class TestGraphAPI(object):
 
     def test_rtt(self, db_testing_data):
         """
-        Tests the rtt API endpoint of the Graph API
+        Tests the rtt API endpoint of the Graph API.
 
-        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client
+        :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest)
         """
 
         from_timestamp, to_timestamp, test_db_name, graph_db = db_testing_data
 
+        # create a graph to use for RTT test by using the build API endpoint
         service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)",
                                         "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
                                  minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
@@ -335,6 +343,7 @@ class TestGraphAPI(object):
         assert graph_subresponse.get("uuid") is not None, "Request UUID must be attached to the response."
         request_id = graph_subresponse["uuid"]
 
+        # test some more error case handling of the RTT API endpoint
         request = testing.DummyRequest()
         request.matchdict["graph_id"] = request_id
         request.params["endpoint"] = "nginx_1_ep1"
@@ -368,6 +377,7 @@ class TestGraphAPI(object):
             error_raised = True
         assert error_raised, "HTTP Not Found error must be thrown for a non existing endpoint"
 
+        # go through the set of input/output (expected) parameters and assert actual results match with expected ones
         for dc, endpoint, forward_latencies, reverse_latencies, response_time, request_size, response_size, rtt, global_tags in (
             ("DC6", "nginx_1_ep2", [], [], 22.2, 35600, 6420, 22.2, {"location": "DC6", "sr": "sr6", "ipendpoint": "nginx_1_ep2", "host": "host2", "sfc": "test_sfc1", "sfc_i": "test_sfc1_1", "sf": "nginx", "sf_i": "nginx_1"}),
             ("DC2", "nginx_1_ep2", [11, 15, 4.5], [5.5, 13, 7.5], 22.2, 35600, 6420, 78, {"location": "DC6", "sr": "sr6", "ipendpoint": "nginx_1_ep2", "host": "host2", "sfc": "test_sfc1", "sfc_i": "test_sfc1_1", "sf": "nginx", "sf_i": "nginx_1"}),
@@ -378,12 +388,13 @@ class TestGraphAPI(object):
             request.params["endpoint"] = endpoint
             request.params["compute_node"] = dc
             response = GraphAPI(request).run_rtt_query()
+            # approximation is used to avoid long float numbers retrieved from influx, the test case ensures the results are different enough so that approximation of +-1 is good enough for testing
             assert response.pop("round_trip_time") == pytest.approx(rtt, 1), "Incorrect RTT response"
             assert response == {"forward_latencies": forward_latencies, "reverse_latencies": reverse_latencies, "total_forward_latency": sum(forward_latencies), "total_reverse_latency": sum(reverse_latencies),
                                 "bandwidth": 104857600, "response_time": response_time, "global_tags": global_tags,
                                 "request_size": request_size, "response_size": response_size}, "Incorrect RTT response"
 
-        # send a new request for a new service function chain
+        # send a new request for a new service function chain to create a second subgraph to test
         service_functions = dict(minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
                                         "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
                                  apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
@@ -400,6 +411,7 @@ class TestGraphAPI(object):
         assert graph_subresponse.get("uuid") is not None, "Request UUID must be attached to the response."
         request_id = graph_subresponse["uuid"]
 
+        # go through the set of input/output (expected) parameters and assert actual results match with expected ones
         for dc, endpoint, forward_latencies, reverse_latencies, response_time, request_size, response_size, rtt, global_tags in (
             ("DC5", "apache_1_ep1", [], [], 17.6, 1480, 7860, 17.6, {"location": "DC5", "sr": "sr5", "ipendpoint": "apache_1_ep1", "host": "host5", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "apache", "sf_i": "apache_1"}),
             ("DC5", "minio_2_ep1", [], [], 7, 2998, 3610, 7, {"location": "DC5", "sr": "sr5", "ipendpoint": "minio_2_ep1", "host": "host4", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "minio", "sf_i": "minio_2"}),
@@ -411,6 +423,7 @@ class TestGraphAPI(object):
             request.params["endpoint"] = endpoint
             request.params["compute_node"] = dc
             response = GraphAPI(request).run_rtt_query()
+            # approximation is used to avoid long float numbers retrieved from influx, the test case ensures the results are different enough so that approximation of +-1 is good enough for testing
             assert response.pop("request_size") == pytest.approx(request_size, 1), "Incorrect RTT response"
             assert response.pop("response_size") == pytest.approx(response_size, 1), "Incorrect RTT response"
             assert response.pop("round_trip_time") == pytest.approx(rtt, 1), "Incorrect RTT response"
@@ -420,7 +433,7 @@ class TestGraphAPI(object):
     @staticmethod
     def check_exist_relationship(relationships_tuple, graph, uuid):
         """
-        Iterates through a tuple of relation ships and checks that each of those exists.
+        Iterates through a tuple of relationships and checks that each of those exists - a utility method to be reused for testing.
 
         :param relationships_tuple: the tuple to iterate
         :param graph: the graph object
@@ -433,12 +446,12 @@ class TestGraphAPI(object):
                 from_node = graph.nodes.match(from_node_type, name=from_node_name, uuid=uuid).first()
             else:
                 from_node = graph.nodes.match(from_node_type, name=from_node_name).first()
-            assert from_node is not None
+            assert from_node is not None  # IMPORTANT, assert the from_node exists, otherwise the py2neo RelationshipMatcher object assumes you are looking for any node (instead of raising an error)
 
             if to_node_type == "Endpoint":
                 to_node = graph.nodes.match(to_node_type, name=to_node_name, uuid=uuid).first()
             else:
                 to_node = graph.nodes.match(to_node_type, name=to_node_name).first()
-            assert to_node is not None
+            assert to_node is not None  # IMPORTANT, assert the from_node exists, otherwise the py2neo RelationshipMatcher object assumes you are looking for any node (instead of raising an error)
 
             assert graph.relationships.match(nodes=(from_node, to_node), r_type=relationship_type).first() is not None, "Graph is missing a required relationship"