From 608a15181029ecfc7b9bd9f467ed62df5127832a Mon Sep 17 00:00:00 2001
From: Nikolay Stanchev <ns17@it-innovation.soton.ac.uk>
Date: Tue, 17 Jul 2018 15:43:12 +0100
Subject: [PATCH] Updates graph rtt API endpoint to also return calculated
 round trip time + updates tests

---
 src/service/clmcservice/__init__.py           |  9 +-
 .../generate_network_measurements.py          |  2 +-
 src/service/clmcservice/graphapi/conftest.py  | 63 ++++++-------
 src/service/clmcservice/graphapi/tests.py     | 88 ++++++++++++-------
 src/service/clmcservice/graphapi/utilities.py | 18 ++--
 src/service/clmcservice/graphapi/views.py     | 44 +++++++++-
 src/service/development.ini                   |  2 +
 src/service/network_config.json               |  1 +
 src/service/production.ini                    |  2 +
 9 files changed, 154 insertions(+), 75 deletions(-)

diff --git a/src/service/clmcservice/__init__.py b/src/service/clmcservice/__init__.py
index 843fd22..9f25df6 100644
--- a/src/service/clmcservice/__init__.py
+++ b/src/service/clmcservice/__init__.py
@@ -22,6 +22,8 @@
 //      Created for Project :   FLAME
 """
 
+from os import path
+from json import load
 from pyramid.config import Configurator
 from sqlalchemy import engine_from_config
 from clmcservice.models.meta import DBSession, Base
@@ -45,6 +47,11 @@ def main(global_config, **settings):
     settings[MALFORMED_FLAG] = False
     settings['influx_port'] = int(settings['influx_port'])  # the influx port setting must be converted to integer instead of a string
 
+    network_config_file_path = settings["network_configuration_path"]
+    with open(network_config_file_path) as f:
+        network = load(f)
+        settings["network_bandwidth"] = network["bandwidth"]
+
     config = Configurator(settings=settings)
 
     # add routes of the aggregator API
@@ -61,7 +68,7 @@ def main(global_config, **settings):
     config.add_route('config_sfc_instance', '/config/sf-chains/instance')
 
     # add routes of the GRAPH API
-    config.add_route('graph_build', '/graph/build')
+    config.add_route('graph_build', '/graph/temporal')
     config.add_route('graph_manage', '/graph/temporal/{graph_id}')
     config.add_route('graph_algorithms_rtt', '/graph/temporal/{graph_id}/round-trip-time')
 
diff --git a/src/service/clmcservice/generate_network_measurements.py b/src/service/clmcservice/generate_network_measurements.py
index bf36f9d..5d1a162 100644
--- a/src/service/clmcservice/generate_network_measurements.py
+++ b/src/service/clmcservice/generate_network_measurements.py
@@ -62,7 +62,7 @@ def report_network_measurements(influx_host, db_name, json_data, neo4j_host, neo
         # get the dictionary of result points; the next() function just gets the first element of the query results generator (we only expect one item in the generator)
         try:
             actual_result = next(result.get_points())
-            latency = actual_result.get("mean_average_response_ms")
+            latency = actual_result.get("mean_average_response_ms")/2
             if graph.relationships.match(nodes=(from_node, to_node), r_type="linkedTo").first() is None:
                 edge = Relationship(from_node, "linkedTo", to_node, latency=latency)
                 graph.create(edge)
diff --git a/src/service/clmcservice/graphapi/conftest.py b/src/service/clmcservice/graphapi/conftest.py
index df21ef5..4e00b16 100644
--- a/src/service/clmcservice/graphapi/conftest.py
+++ b/src/service/clmcservice/graphapi/conftest.py
@@ -28,6 +28,7 @@ from clmcservice.generate_network_measurements import report_network_measurement
 from py2neo import Graph
 
 network_config = {
+    "bandwidth": 104857600,
     "links": [
         {
             "source": "DC1",
@@ -147,58 +148,58 @@ def db_testing_data():
     to_timestamp = 1528685860
 
     data = [
-        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 5, 20, 1528385860),
-        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 8, 35, 1528385860),
-        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 7, 15, 1528389860),
-        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 10, 23, 1528389860),
-        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 12, 17, 1528395860),
-        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 15, 11, 1528395860),
-        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 17, 23, 1528485860),
-        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 19, 24, 1528485860),
-        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 11, 16, 1528545860),
-        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 20, 18, 1528545860)
+        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 5, 20, 1500, 15000, 1528385860),
+        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 8, 35, 1000, 11000, 1528385860),
+        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 7, 15, 2300, 10000, 1528389860),
+        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 10, 23, 98000, 1200, 1528389860),
+        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 12, 17, 2000, 7500, 1528395860),
+        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 15, 11, 1300, 6700, 1528395860),
+        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 17, 23, 3000, 8300, 1528485860),
+        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 19, 24, 76000, 1200, 1528485860),
+        ("host1", "nginx_1_ep1", "DC4", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr4", 11, 16, 2500, 7500, 1528545860),
+        ("host2", "nginx_1_ep2", "DC6", "nginx", "nginx_1", "test_sfc1", "test_sfc1_1", "sr6", 20, 18, 1700, 12000, 1528545860)
     ]
     influx.write_points([
         {"measurement": "nginx",
          "tags": {"host": host, "ipendpoint": endpoint, "location": location, "sf": sf, "sf_i": sf_i, "sfc": sfc, "sfc_i": sfc_i, "sr": sr},
-         "fields": {"requests": num_requests, "avg_processing_time": processing_time},
+         "fields": {"requests": num_requests, "avg_processing_time": processing_time, "avg_request_size": request_size, "avg_response_size": response_size},
          "time": timestamp * 10 ** 9
-         } for host, endpoint, location, sf, sf_i, sfc, sfc_i, sr, num_requests, processing_time, timestamp in data
+         } for host, endpoint, location, sf, sf_i, sfc, sfc_i, sr, num_requests, processing_time, request_size, response_size, timestamp in data
     ])
 
     data = [
-        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 12, 86, 1528386860),
-        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 15, 75, 1528386860),
-        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 7, 105, 1528388860),
-        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 12, 60, 1528388860),
-        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 11, 121, 1528410860),
-        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 12, 154, 1528410860),
-        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 14, 84, 1528412860),
-        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 5, 45, 1528412860),
-        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 7, 63, 1528414860),
-        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 16, 86, 1528414860)
+        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 12, 86, 101000, 4700, 1528386860),
+        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 15, 75, 96000, 6300, 1528386860),
+        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 7, 105, 5200, 89200, 1528388860),
+        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 12, 60, 76900, 2100, 1528388860),
+        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 11, 121, 99500, 3500, 1528410860),
+        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 12, 154, 2700, 111000, 1528410860),
+        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 14, 84, 1100, 4300, 1528412860),
+        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 5, 45, 1200, 3200, 1528412860),
+        ("host3", "minio_1_ep1", "DC4", "minio", "minio_1", "test_sfc1", "test_sfc1_1", "sr4", 7, 63, 87000, 2000, 1528414860),
+        ("host4", "minio_2_ep1", "DC5", "minio", "minio_2", "test_sfc2", "test_sfc2_1", "sr5", 16, 86, 3100, 94000, 1528414860)
     ]
     influx.write_points([
         {"measurement": "minio_http",
          "tags": {"host": host, "ipendpoint": endpoint, "location": location, "sf": sf, "sf_i": sf_i, "sfc": sfc, "sfc_i": sfc_i, "sr": sr},
-         "fields": {"total_requests_count": num_requests, "total_processing_time": processing_time},
+         "fields": {"total_requests_count": num_requests, "total_processing_time": processing_time, "total_requests_size": request_size, "total_response_size": response_size},
          "time": timestamp * 10 ** 9
-         } for host, endpoint, location, sf, sf_i, sfc, sfc_i, sr, num_requests, processing_time, timestamp in data
+         } for host, endpoint, location, sf, sf_i, sfc, sfc_i, sr, num_requests, processing_time, request_size, response_size, timestamp in data
     ])
 
     data = [
-        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 15, 1528386860),
-        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 17, 1528388860),
-        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 19, 1528410860),
-        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 24, 1528412860),
-        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 13, 1528414860),
+        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 15, 1400, 15600, 1528386860),
+        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 17, 2200, 11200, 1528388860),
+        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 19, 700, 5700, 1528410860),
+        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 24, 1900, 4300, 1528412860),
+        ("host5", "apache_1_ep1", "DC5", "apache", "apache_1", "test_sfc2", "test_sfc2_1", "sr5", 13, 1200, 2500, 1528414860),
     ]
     influx.write_points([
         {"measurement": "apache",
          "tags": {"host": host, "ipendpoint": endpoint, "location": location, "sf": sf, "sf_i": sf_i, "sfc": sfc, "sfc_i": sfc_i, "sr": sr},
-         "fields": {"avg_processing_time": processing_time},
+         "fields": {"avg_processing_time": processing_time, "avg_request_size": request_size, "avg_response_size": response_size},
          "time": timestamp * 10 ** 9
-         } for host, endpoint, location, sf, sf_i, sfc, sfc_i, sr, processing_time, timestamp in data
+         } for host, endpoint, location, sf, sf_i, sfc, sfc_i, sr, processing_time, request_size, response_size, timestamp in data
     ])
 
     yield from_timestamp, to_timestamp, test_db_name, graph
diff --git a/src/service/clmcservice/graphapi/tests.py b/src/service/clmcservice/graphapi/tests.py
index d5c4554..1aacb5c 100644
--- a/src/service/clmcservice/graphapi/tests.py
+++ b/src/service/clmcservice/graphapi/tests.py
@@ -45,7 +45,7 @@ class TestGraphAPI(object):
         """
 
         self.registry = testing.setUp()
-        self.registry.add_settings({"neo4j_host": "localhost", "neo4j_password": "admin", "influx_host": "localhost", "influx_port": 8086})
+        self.registry.add_settings({"neo4j_host": "localhost", "neo4j_password": "admin", "influx_host": "localhost", "influx_port": 8086, "network_bandwidth": 104857600})
 
         yield
 
@@ -57,21 +57,21 @@ class TestGraphAPI(object):
         ('{"database": "CLMCMetrics", "retention_policy": "autogen", "service_function_chain_instance": "sfc_i"}', 12341412, 1234897, "A bad request error must have been raised in case of invalid request body."),
         ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": "{invalid_json}"}', 1528386860, 1528389860, "A bad request error must have been raised in case of invalid request body."),
         ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": ["nginx", "minio"]}', 1528386860, 1528389860, "A bad request error must have been raised in case of invalid request body."),
-        ('{"retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)"}, "minio": {"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)"}, "apache": {"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)"}}}',
+        ('{"retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
          1528386860, 1528389860, "A bad request error must have been raised in case of missing database value in the request body"),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_id", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)"}, "minio": {"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)"}, "apache": {"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)"}}}',
+        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_id", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
          1528386860, 1528389860, "A bad request error must have been raised in case of invalid sfc_i ID in the request body"),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "testsfc1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)"}, "minio": {"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)"}}}',
+        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "testsfc1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
          1528386860, 1528389860, "A bad request error must have been raised in case of invalid sfc_i ID in the request body"),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)"}, "minio": {"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)"}, "apache": {"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)"}}}',
+        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
          "not a timestamp", "not a timestamp", "A bad request error must have been raised in case of invalid URL parameters."),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)"}, "minio": {"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)"}, "apache": {"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)"}}}',
+        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
          None, "not a timestamp", "A bad request error must have been raised in case of invalid URL parameters."),
-        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)"}, "minio": {"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)"}, "apache": {"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)"}}}',
+        ('{"database": "TestInfluxDB", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
          2131212, None, "A bad request error must have been raised in case of invalid URL parameters."),
-        ('{"database": "DB-not-exists", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)"}, "minio": {"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)"}, "apache": {"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)"}}}',
+        ('{"database": "DB-not-exist", "retention_policy": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
          2131212, 2131212, "A bad request error must have been raised in case of a non-existing database."),
-        ('{"database": "TestInfluxDB", "retention_policy-invalid": "autogen", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)"}, "minio": {"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)"}, "apache": {"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)"}}}',
+        ('{"database": "TestInfluxDB", "retention_policy": "autogen-invalid", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
          2131212, 2131212, "A bad request error must have been raised in case of a non-existing retention policy."),
     ])
     def test_build_error_handling(self, body, from_timestamp, to_timestamp, error_msg):
@@ -113,9 +113,12 @@ class TestGraphAPI(object):
         assert dc_nodes == set("DC" + str(i) for i in range(1, 7)), "Compute nodes must have been created by the db_testing_data fixture"
 
         # test with invalid URL parameters naming
-        service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)"},
-                                 minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)"},
-                                 apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)"})
+        service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)",
+                                        "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
+                                 minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
+                                        "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
+                                 apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
+                                         "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"})
         body = dumps(dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="sfc_1", service_functions=service_functions))
         request = testing.DummyRequest()
         request.params["from_timestamp"] = 12341412
@@ -128,8 +131,10 @@ class TestGraphAPI(object):
             error_raised = True
         assert error_raised, "A bad request error must have been raised in case of invalid URL parameters."
 
-        service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)"},
-                                 minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)"})
+        service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)",
+                                        "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
+                                 minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
+                                        "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"})
         build_json_body = dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="test_sfc1_1", service_functions=service_functions)
         body = dumps(build_json_body)
         request = testing.DummyRequest()
@@ -177,13 +182,17 @@ class TestGraphAPI(object):
         )
 
         # check endpoint nodes response time property
-        for endpoint, response_time in (("minio_1_ep1", 9), ("nginx_1_ep1", 18.2), ("nginx_1_ep2", 22.2)):
+        for endpoint, response_time, request_size, response_size in (("minio_1_ep1", 9, 5760, 2033), ("nginx_1_ep1", 18.2, 2260, 9660), ("nginx_1_ep2", 22.2, 35600, 6420)):
             endpoint_node = graph_db.nodes.match("Endpoint", name=endpoint, uuid=request_id).first()
             assert endpoint_node["response_time"] == response_time, "Wrong response time property of endpoint node"
+            assert endpoint_node["request_size"] == pytest.approx(request_size, 1), "Wrong request size attribute of endpoint node"
+            assert endpoint_node["response_size"] == pytest.approx(response_size, 1), "Wrong response size attribute of endpoint node"
 
         # send a new request for a new service function chain
-        service_functions = dict(minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)"},
-                                 apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)"})
+        service_functions = dict(minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
+                                        "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
+                                 apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
+                                         "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"})
         build_json_body = dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="test_sfc2_1", service_functions=service_functions)
         body = dumps(build_json_body)
         request = testing.DummyRequest()
@@ -229,9 +238,11 @@ class TestGraphAPI(object):
         )
 
         # check endpoint nodes response time property
-        for endpoint, response_time in (("minio_2_ep1", 7), ("apache_1_ep1", 17.6)):
+        for endpoint, response_time, request_size, response_size in (("minio_2_ep1", 7, 2998, 3610), ("apache_1_ep1", 17.6, 1480, 7860)):
             endpoint_node = graph_db.nodes.match("Endpoint", name=endpoint, uuid=request_id).first()
             assert endpoint_node["response_time"] == response_time, "Wrong response time property of endpoint node"
+            assert endpoint_node["request_size"] == pytest.approx(request_size, 1), "Wrong request size attribute of endpoint node"
+            assert endpoint_node["response_size"] == pytest.approx(response_size, 1), "Wrong response size attribute of endpoint node"
 
     def test_delete(self, db_testing_data):
         """
@@ -308,8 +319,10 @@ class TestGraphAPI(object):
 
         from_timestamp, to_timestamp, test_db_name, graph_db = db_testing_data
 
-        service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)"},
-                                 minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)"})
+        service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)",
+                                        "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
+                                 minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
+                                        "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"})
         build_json_body = dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="test_sfc1_1", service_functions=service_functions)
         body = dumps(build_json_body)
         request = testing.DummyRequest()
@@ -355,21 +368,26 @@ class TestGraphAPI(object):
             error_raised = True
         assert error_raised, "HTTP Not Found error must be thrown for a non existing endpoint"
 
-        for dc, endpoint, forward_latencies, reverse_latencies, response_time, global_tags in (
-            ("DC6", "nginx_1_ep2", [], [], 22.2, {"location": "DC6", "sr": "sr6", "ipendpoint": "nginx_1_ep2", "host": "host2", "sfc": "test_sfc1", "sfc_i": "test_sfc1_1", "sf": "nginx", "sf_i": "nginx_1"}),
-            ("DC2", "nginx_1_ep2", [22, 30, 9], [11, 26, 15], 22.2, {"location": "DC6", "sr": "sr6", "ipendpoint": "nginx_1_ep2", "host": "host2", "sfc": "test_sfc1", "sfc_i": "test_sfc1_1", "sf": "nginx", "sf_i": "nginx_1"}),
-            ("DC3", "nginx_1_ep1", [25], [15], 18.2, {"location": "DC4", "sr": "sr4", "ipendpoint": "nginx_1_ep1", "host": "host1", "sfc": "test_sfc1", "sfc_i": "test_sfc1_1", "sf": "nginx", "sf_i": "nginx_1"})
+        for dc, endpoint, forward_latencies, reverse_latencies, response_time, request_size, response_size, rtt, global_tags in (
+            ("DC6", "nginx_1_ep2", [], [], 22.2, 35600, 6420, 22.2, {"location": "DC6", "sr": "sr6", "ipendpoint": "nginx_1_ep2", "host": "host2", "sfc": "test_sfc1", "sfc_i": "test_sfc1_1", "sf": "nginx", "sf_i": "nginx_1"}),
+            ("DC2", "nginx_1_ep2", [11, 15, 4.5], [5.5, 13, 7.5], 22.2, 35600, 6420, 78, {"location": "DC6", "sr": "sr6", "ipendpoint": "nginx_1_ep2", "host": "host2", "sfc": "test_sfc1", "sfc_i": "test_sfc1_1", "sf": "nginx", "sf_i": "nginx_1"}),
+            ("DC3", "nginx_1_ep1", [12.5], [7.5], 18.2, 2260, 9660, 38, {"location": "DC4", "sr": "sr4", "ipendpoint": "nginx_1_ep1", "host": "host1", "sfc": "test_sfc1", "sfc_i": "test_sfc1_1", "sf": "nginx", "sf_i": "nginx_1"})
         ):
             request = testing.DummyRequest()
             request.matchdict["graph_id"] = request_id
             request.params["endpoint"] = endpoint
             request.params["compute_node"] = dc
             response = GraphAPI(request).run_rtt_query()
-            assert response == {"forward_latencies": forward_latencies, "reverse_latencies": reverse_latencies, "response_time": response_time, "global_tags": global_tags}, "Incorrect RTT response"
+            assert response.pop("round_trip_time") == pytest.approx(rtt, 1), "Incorrect RTT response"
+            assert response == {"forward_latencies": forward_latencies, "reverse_latencies": reverse_latencies, "total_forward_latency": sum(forward_latencies), "total_reverse_latency": sum(reverse_latencies),
+                                "bandwidth": 104857600, "response_time": response_time, "global_tags": global_tags,
+                                "request_size": request_size, "response_size": response_size}, "Incorrect RTT response"
 
         # send a new request for a new service function chain
-        service_functions = dict(minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)"},
-                                 apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)"})
+        service_functions = dict(minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
+                                        "request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
+                                 apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
+                                         "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"})
         build_json_body = dict(database=test_db_name, retention_policy="autogen", service_function_chain_instance="test_sfc2_1", service_functions=service_functions)
         body = dumps(build_json_body)
         request = testing.DummyRequest()
@@ -382,18 +400,22 @@ class TestGraphAPI(object):
         assert graph_subresponse.get("uuid") is not None, "Request UUID must be attached to the response."
         request_id = graph_subresponse["uuid"]
 
-        for dc, endpoint, forward_latencies, reverse_latencies, response_time, global_tags in (
-            ("DC5", "apache_1_ep1", [], [], 17.6, {"location": "DC5", "sr": "sr5", "ipendpoint": "apache_1_ep1", "host": "host5", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "apache", "sf_i": "apache_1"}),
-            ("DC5", "minio_2_ep1", [], [], 7, {"location": "DC5", "sr": "sr5", "ipendpoint": "minio_2_ep1", "host": "host4", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "minio", "sf_i": "minio_2"}),
-            ("DC3", "apache_1_ep1", [20, 30], [26, 18], 17.6, {"location": "DC5", "sr": "sr5", "ipendpoint": "apache_1_ep1", "host": "host5", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "apache", "sf_i": "apache_1"}),
-            ("DC2", "minio_2_ep1", [22, 30], [26, 15], 7, {"location": "DC5", "sr": "sr5", "ipendpoint": "minio_2_ep1", "host": "host4", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "minio", "sf_i": "minio_2"})
+        for dc, endpoint, forward_latencies, reverse_latencies, response_time, request_size, response_size, rtt, global_tags in (
+            ("DC5", "apache_1_ep1", [], [], 17.6, 1480, 7860, 17.6, {"location": "DC5", "sr": "sr5", "ipendpoint": "apache_1_ep1", "host": "host5", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "apache", "sf_i": "apache_1"}),
+            ("DC5", "minio_2_ep1", [], [], 7, 2998, 3610, 7, {"location": "DC5", "sr": "sr5", "ipendpoint": "minio_2_ep1", "host": "host4", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "minio", "sf_i": "minio_2"}),
+            ("DC3", "apache_1_ep1", [10, 15], [13, 9], 17.6, 1480, 7860, 64, {"location": "DC5", "sr": "sr5", "ipendpoint": "apache_1_ep1", "host": "host5", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "apache", "sf_i": "apache_1"}),
+            ("DC2", "minio_2_ep1", [11, 15], [13, 7.5], 7, 2998, 3610, 53, {"location": "DC5", "sr": "sr5", "ipendpoint": "minio_2_ep1", "host": "host4", "sfc": "test_sfc2", "sfc_i": "test_sfc2_1", "sf": "minio", "sf_i": "minio_2"})
         ):
             request = testing.DummyRequest()
             request.matchdict["graph_id"] = request_id
             request.params["endpoint"] = endpoint
             request.params["compute_node"] = dc
             response = GraphAPI(request).run_rtt_query()
-            assert response == {"forward_latencies": forward_latencies, "reverse_latencies": reverse_latencies, "response_time": response_time, "global_tags": global_tags}, "Incorrect RTT response"
+            assert response.pop("request_size") == pytest.approx(request_size, 1), "Incorrect RTT response"
+            assert response.pop("response_size") == pytest.approx(response_size, 1), "Incorrect RTT response"
+            assert response.pop("round_trip_time") == pytest.approx(rtt, 1), "Incorrect RTT response"
+            assert response == {"forward_latencies": forward_latencies, "reverse_latencies": reverse_latencies, "total_forward_latency": sum(forward_latencies), "total_reverse_latency": sum(reverse_latencies),
+                                "bandwidth": 104857600, "response_time": response_time, "global_tags": global_tags}, "Incorrect RTT response"
 
     @staticmethod
     def check_exist_relationship(relationships_tuple, graph, uuid):
diff --git a/src/service/clmcservice/graphapi/utilities.py b/src/service/clmcservice/graphapi/utilities.py
index 2841152..58d1dff 100644
--- a/src/service/clmcservice/graphapi/utilities.py
+++ b/src/service/clmcservice/graphapi/utilities.py
@@ -31,9 +31,9 @@ GRAPH_ROUND_TRIP_TIME_URL_PARAMS = ("compute_node", "endpoint")
 
 GRAPH_BUILD_URL_PARAMS = ("from", "to")
 GRAPH_BUILD_QUERY_PARAMS = {"database", "retention_policy", "service_function_chain_instance", "service_functions"}
-GRAPH_BUILD_SF_QUERY_PARAMS = {"response_time_field", "measurement_name"}
+GRAPH_BUILD_SF_QUERY_PARAMS = {"response_time_field", "request_size_field", "response_size_field", "measurement_name"}
 
-INFLUX_QUERY_TEMPLATE = 'SELECT {0} AS mean_response_time FROM "{1}"."{2}".{3} WHERE sfc_i=\'{4}\' and time>={5} and time<{6} GROUP BY ipendpoint, location, sf_i, host, sr'
+INFLUX_QUERY_TEMPLATE = 'SELECT {0} AS mean_response_time, {1} AS mean_request_size, {2} AS mean_response_size FROM "{3}"."{4}".{5} WHERE sfc_i=\'{6}\' and time>={7} and time<{8} GROUP BY ipendpoint, location, sf_i, host, sr'
 
 
 RTT_CYPHER_QUERY_TEMPLATE = """
@@ -43,9 +43,9 @@ WHERE ALL(r IN relationships(path) WHERE type(r)='linkedTo' or type(r)='hostedBy
 WITH nodes(path) as all_nodes, endpoint as endpoint
     WITH all_nodes[0..size(all_nodes)-1] as network_nodes, endpoint as endpoint
     UNWIND RANGE(0, size(network_nodes) - 2) as id
-    WITH network_nodes[id] as source, network_nodes[id+1] as target, endpoint.response_time as response_time
+    WITH network_nodes[id] as source, network_nodes[id+1] as target, endpoint.response_time as response_time, endpoint.request_size as request_size, endpoint.response_size as response_size
         MATCH (source) -[r1]-> (target), (target) -[r2]-> (source)
-        RETURN collect(r1.latency) as forward_latencies, reverse(collect(r2.latency)) as reverse_latencies, response_time
+        RETURN collect(r1.latency) as forward_latencies, reverse(collect(r2.latency)) as reverse_latencies, response_time, request_size, response_size
 """
 
 
@@ -218,11 +218,13 @@ def build_temporal_graph(request_id, from_timestamp, to_timestamp, json_queries,
     for service_function in json_queries["service_functions"]:
         query_data = json_queries["service_functions"][service_function]
 
-        field_to_query = query_data["response_time_field"]
+        response_time_field = query_data["response_time_field"]
+        request_size_field = query_data["request_size_field"]
+        response_size_field = query_data["response_size_field"]
         measurement = query_data["measurement_name"]
 
         # build up the query by setting the placeholders in the query template
-        query_to_execute = INFLUX_QUERY_TEMPLATE.format(field_to_query, db, rp, measurement, sfc_i, from_timestamp, to_timestamp)
+        query_to_execute = INFLUX_QUERY_TEMPLATE.format(response_time_field, request_size_field, response_size_field, db, rp, measurement, sfc_i, from_timestamp, to_timestamp)
 
         # create a node for the service function if it doesn't exist
         service_function_node = find_or_create_node(graph, "ServiceFunction", name=service_function)
@@ -242,6 +244,8 @@ def build_temporal_graph(request_id, from_timestamp, to_timestamp, json_queries,
 
             result_point = next(result_points)  # get the result point dictionary
             response_time = result_point["mean_response_time"]  # extract the response time of the SF from the result
+            request_size = result_point["mean_request_size"]  # extract the avg request size of the SF from the result
+            response_size = result_point["mean_response_size"]  # extract the avg response size of the SF from the result
 
             # create a ServiceFunctionInstance node from the tag value (if it is not already created)
             service_function_instance_node = find_or_create_node(graph, "ServiceFunctionInstance", name=tags["sf_i"])
@@ -251,7 +255,7 @@ def build_temporal_graph(request_id, from_timestamp, to_timestamp, json_queries,
             find_or_create_edge(graph, "utilizedBy", service_function_instance_node, service_function_chain_instance_node)
 
             # create an Endpoint node from the tag value (if it is not already created)
-            ipendpoint_node = find_or_create_node(graph, "Endpoint", name=tags["ipendpoint"], response_time=response_time, uuid=request_id, host=tags["host"], sr=tags["sr"])
+            ipendpoint_node = find_or_create_node(graph, "Endpoint", name=tags["ipendpoint"], response_time=response_time, request_size=request_size, response_size=response_size, uuid=request_id, host=tags["host"], sr=tags["sr"])
             # create an edge between the instance and the endpoint (if it is not already created)
             find_or_create_edge(graph, "realisedBy", service_function_instance_node, ipendpoint_node)
 
diff --git a/src/service/clmcservice/graphapi/views.py b/src/service/clmcservice/graphapi/views.py
index 4d1a94f..79ff877 100644
--- a/src/service/clmcservice/graphapi/views.py
+++ b/src/service/clmcservice/graphapi/views.py
@@ -148,7 +148,8 @@ class GraphAPI(object):
         # check if the endpoint is hosted by the compute node before running the RTT cypher query
         hosted_by_node = graph.relationships.match(nodes=(endpoint_node, None), r_type="hostedBy").first().end_node
         if hosted_by_node["name"] == compute_node["name"]:
-            result = {"forward_latencies": [], "reverse_latencies": [], "response_time": endpoint_node["response_time"]}
+            result = {"forward_latencies": [], "reverse_latencies": [], "response_time": endpoint_node["response_time"],
+                      "request_size": endpoint_node["request_size"], "response_size": endpoint_node["response_size"]}
         else:
             query_to_execute = RTT_CYPHER_QUERY_TEMPLATE.format(compute_node_label, endpoint_node_label, graph_id)
             log.info("Executing cypher query: {0}".format(query_to_execute))
@@ -169,4 +170,43 @@ class GraphAPI(object):
 
         result["global_tags"] = {"ipendpoint": endpoint_node["name"], "host": endpoint_node["host"], "location": hosted_by_node["name"], "sr": endpoint_node["sr"],
                                  "sfc": reference_node["sfc"], "sfc_i": reference_node["sfc_i"], "sf": sf_node["name"], "sf_i": sf_i_node["name"]}
-        return result  # we only expect one result from the query - a dictionary with the following fields: forward_latencies, reverse_latencies, response_time
+
+        # calculate the Round-Trip-Time
+        total_forward_latency = sum(result["forward_latencies"])
+        result["total_forward_latency"] = total_forward_latency
+        total_reverse_latency = sum(result["reverse_latencies"])
+        result["total_reverse_latency"] = total_reverse_latency
+        bandwidth = self.request.registry.settings["network_bandwidth"]
+        result["bandwidth"] = bandwidth
+        service_delay = result["response_time"]
+        request_size = result["request_size"]
+        response_size = result["response_size"]
+
+        round_trip_time = self.calculate_round_trip_time(total_forward_latency, total_reverse_latency, service_delay, request_size, response_size, bandwidth)
+        result["round_trip_time"] = round_trip_time
+
+        return result
+
+    @staticmethod
+    def calculate_round_trip_time(forward_latency, reverse_latency, service_delay, request_size, response_size, bandwidth, packet_size=1500, packet_header_size=50):
+        """
+        Calculates the round trip time given the list of arguments.
+
+        :param forward_latency: network latency in forward direction (s)
+        :param reverse_latency: network latency in reverse direction (s)
+        :param service_delay: media service delay (s)
+        :param request_size: request size (bytes)
+        :param response_size: response size (bytes)
+        :param bandwidth: network bandwidth (Mb/s)
+        :param packet_size: size of packet (bytes)
+        :param packet_header_size: size of the header of the packet (bytes)
+        :return: the calculated round trip time
+        """
+
+        if forward_latency > 0 and reverse_latency > 0:
+            forward_data_delay = (8/10**6) * (request_size / bandwidth) * (packet_size / (packet_size - packet_header_size))
+            reverse_data_delay = (8/10**6) * (response_size / bandwidth) * (packet_size / (packet_size - packet_header_size))
+        else:
+            forward_data_delay, reverse_data_delay = 0, 0
+
+        return forward_latency + forward_data_delay + service_delay + reverse_latency + reverse_data_delay
diff --git a/src/service/development.ini b/src/service/development.ini
index 092060e..3d14f33 100644
--- a/src/service/development.ini
+++ b/src/service/development.ini
@@ -17,6 +17,8 @@ exclog.ignore =
 # Configuration file path
 configuration_file_path = /etc/flame/clmc/service.conf
 
+network_configuration_path = /vagrant/src/service/network_config.json
+
 # PostgreSQL connection url
 sqlalchemy.url = postgresql://clmc:clmc_service@localhost:5432/whoamidb
 
diff --git a/src/service/network_config.json b/src/service/network_config.json
index 2256fc7..c8ca7cb 100644
--- a/src/service/network_config.json
+++ b/src/service/network_config.json
@@ -1,4 +1,5 @@
 {
+  "bandwidth": 104857600,
   "links": [
     {
       "source": "DC1",
diff --git a/src/service/production.ini b/src/service/production.ini
index c05a6b9..33c7ca6 100644
--- a/src/service/production.ini
+++ b/src/service/production.ini
@@ -17,6 +17,8 @@ exclog.ignore =
 # Configuration file path
 configuration_file_path = /etc/flame/clmc/service.conf
 
+network_configuration_path = /vagrant/src/service/network_config.json
+
 # PostgreSQL connection url
 sqlalchemy.url = postgresql://clmc:clmc_service@localhost:5432/whoamidb
 
-- 
GitLab