diff --git a/src/service/clmcservice/graphapi/conftest.py b/src/service/clmcservice/graphapi/conftest.py index f4017e807f8211183377524b2ce795590ccdbf5c..8adc2834e4866ef818ceeddbda4c64eadb81214b 100644 --- a/src/service/clmcservice/graphapi/conftest.py +++ b/src/service/clmcservice/graphapi/conftest.py @@ -25,7 +25,7 @@ import pytest from influxdb import InfluxDBClient from py2neo import Graph -from clmcservice.graphapi.utilities import build_network_graph +from clmcservice.graphapi.utilities import build_network_graph, delete_network_graph # static network configuration data used for testing cases, latencies reported in milliseconds @@ -62,6 +62,14 @@ links = [ } ] +sdn_switches = [ + {"switchDPID": "dpid1", "inetAddress": "/127.0.0.1:1234"}, + {"switchDPID": "dpid2", "inetAddress": "/127.0.0.2:1234"}, + {"switchDPID": "dpid3", "inetAddress": "/127.0.0.3:1234"}, + {"switchDPID": "dpid4", "inetAddress": "/127.0.0.4:1234"}, + {"switchDPID": "dpid5", "inetAddress": "/127.0.0.5:1234"}, + {"switchDPID": "dpid6", "inetAddress": "/127.0.0.6:1234"}, +] switches = { "dpid1": "127.0.0.1", @@ -97,8 +105,6 @@ def db_testing_data(): :return: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object """ - global links, switches, clusters - test_sfc_name = "test_sfc" test_sfc_instance_1_name = "test_sfc_premium" test_sfc_instance_2_name = "test_sfc_non_premium" @@ -111,8 +117,6 @@ def db_testing_data(): # create the physical infrastructure subgraph dbs = influx.get_list_database() - switch_count, cluster_count, ues_count = build_network_graph(graph, switches, links, clusters, ues) - assert switch_count == 6 and cluster_count == 6 and ues_count == 3, "Network graph build failure" # check if exists ( if so, clear ) or create the test DB in influx if test_db_name in dbs: @@ -188,3 +192,19 @@ def db_testing_data(): influx.drop_database("CLMCMetrics") influx.drop_database(test_db_name) graph.delete_all() + + +@pytest.fixture +def graph_network_topology(): + """ + A utility fixture to build the network topology used throughout the test cases. + """ + + global links, switches, clusters + + graph = Graph(host="localhost", password="admin") + build_network_graph(graph, switches, links, clusters, ues) + + yield + + delete_network_graph(graph) diff --git a/src/service/clmcservice/graphapi/tests.py b/src/service/clmcservice/graphapi/tests.py index 48512ab4f8eddcd6289c3f7f225737cc10f69ed6..7830b4338cd51ebb17dcd97f1d63eb2efffa100a 100644 --- a/src/service/clmcservice/graphapi/tests.py +++ b/src/service/clmcservice/graphapi/tests.py @@ -24,12 +24,13 @@ from json import dumps, loads from signal import SIGKILL -from unittest.mock import patch, Mock, PropertyMock +from unittest.mock import patch, Mock, MagicMock, PropertyMock import pytest from pyramid import testing from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound, HTTPInternalServerError from clmcservice.graphapi.views import GraphAPI from clmcservice.models import MonitoringProcess +from clmcservice.graphapi.conftest import links, sdn_switches, ues, clusters class TestGraphAPI(object): @@ -45,7 +46,10 @@ class TestGraphAPI(object): self.registry = testing.setUp() self.registry.add_settings({"neo4j_host": "localhost", "neo4j_password": "admin", "influx_host": "localhost", "influx_port": 8086, "network_bandwidth": 104857600, - "network_ues_path": "/opt/clmc/src/service/resources/GraphAPI/network_ues.json"}) + "network_ues_path": "/opt/clmc/src/service/resources/GraphAPI/network_ues.json", + "network_clusters_path": "/opt/clmc/src/service/resources/GraphAPI/network_ues.json", + "sdn_controller_ip": "127.0.0.1", "sdn_controller_port": 8080 + }) yield @@ -103,12 +107,13 @@ class TestGraphAPI(object): assert error_raised, error_msg @patch('clmcservice.graphapi.views.uuid4') - def test_build(self, uuid_mock, db_testing_data): + def test_build(self, uuid_mock, db_testing_data, graph_network_topology): """ Tests the graph build API endpoint - it makes 2 API calls and checks that the expected graph was created (the influx data that's being used is reported to InfluxDB in the conftest file) :param uuid_mock: mock object to mock the behaviour of the uuid4 function :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data and the graph db client object (this is a fixture from conftest) + :param graph_network_topology: fixture to generate the network topology and clean it afterwards """ from_timestamp, to_timestamp, graph_db = db_testing_data @@ -238,12 +243,13 @@ class TestGraphAPI(object): assert response == {"deleted": 17} @patch('clmcservice.graphapi.views.uuid4') - def test_delete_temporal(self, uuid_mock, db_testing_data): + def test_delete_temporal(self, uuid_mock, db_testing_data, graph_network_topology): """ Tests the delete API endpoint of the Graph API - the test depends on the build test to have been passed successfully so that graph_1_id and graph_2_id have been set :param uuid_mock: the mock object used to mimic the behaviour of the uuid.uuid4 function :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data and the graph db client object (this is a fixture from conftest) + :param graph_network_topology: fixture to generate the network topology and clean it afterwards """ from_timestamp, to_timestamp, graph_db = db_testing_data @@ -360,11 +366,12 @@ class TestGraphAPI(object): error_raised = True assert error_raised, error_msg - def test_rtt(self, db_testing_data): + def test_rtt(self, db_testing_data, graph_network_topology): """ Tests the rtt API endpoint of the Graph API. :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data and the graph db client object (this is a fixture from conftest) + :param graph_network_topology: fixture to generate the network topology and clean it afterwards """ from_timestamp, to_timestamp, graph_db = db_testing_data @@ -461,16 +468,93 @@ class TestGraphAPI(object): assert response == {"forward_latencies": forward_latencies, "reverse_latencies": reverse_latencies, "total_forward_latency": sum(forward_latencies), "total_reverse_latency": sum(reverse_latencies), "bandwidth": 104857600, "response_time": response_time, "local_tags": {"traffic_source": startpoint}, "global_tags": global_tags}, "Incorrect RTT response" - def test_delete_network_graph(self): + @patch('clmcservice.graphapi.views.load') + @patch('clmcservice.graphapi.views.open') + @patch('clmcservice.graphapi.views.get') + def test_build_network(self, http_get_mock, file_open_mock, json_load_mock, db_testing_data): + """ + Tests the functionality to build the network graph. + + :param http_get_mock: mocks the HTTP GET function + :param file_open_mock: mocks the open file function + :param json_load_mock: mocks the JSON load function + :param db_testing_data: fixture used to get a reference to the graph DB + """ + + from_timestamp, to_timestamp, graph_db = db_testing_data # fixture, used to get reference to the graph DB + + # mock the responses from the sdn controller - 3 GET requests are executed, so we need 3 responses + mock_response1 = Mock() + mock_response1.status_code = 200 + mock_response1.json.return_value = sdn_switches + mock_response2 = Mock() + mock_response2.status_code = 200 + mock_response2.json.return_value = links + mock_response3 = Mock() + mock_response3.status_code = 200 + mock_response3.json.return_value = links + # we are doing two calls to the API, hence need to repeat the responses + http_get_mock.side_effect = [mock_response1, mock_response2, mock_response3, mock_response1, mock_response2, mock_response3] + + # mock the behaviour of reading the clusters and ues mappping from files + file_open_mock.return_value = MagicMock() # use magic mock so that special methods (dunders) are auto generated + # we are doing two calls to the API, hence need to repeat the results + json_load_mock.side_effect = [clusters, ues, clusters, ues] + + assert set([node["name"] for node in graph_db.nodes.match("Cluster")]) == set(), "Cluster nodes must not be created before the build request" + assert set([node["name"] for node in graph_db.nodes.match("Switch")]) == set(), "Switch nodes must not be created before the build request" + assert set([node["name"] for node in graph_db.nodes.match("UserEquipment")]) == set(), "UE nodes must not be created before the build request" + + # sent request to build the network topology + request = testing.DummyRequest() + response = GraphAPI(request).build_network_topology() + assert response == {"new_switches_count": 6, "new_clusters_count": 6, "new_ues_count": 3} + + assert set([node["name"] for node in graph_db.nodes.match("Cluster")]) == set(["DC" + str(i) for i in range(1, 7)]), "Cluster nodes must have been created" + assert set([node["name"] for node in graph_db.nodes.match("Switch")]) == set(["127.0.0." + str(i) for i in range(1, 7)]), "Switch nodes must have been created" + assert set([node["name"] for node in graph_db.nodes.match("UserEquipment")]) == set(["ue" + str(i) for i in (2, 3, 6)]), "UE nodes must have been created" + + # send the same request and ensure that nothing else is created + request = testing.DummyRequest() + response = GraphAPI(request).build_network_topology() + assert response == {"new_switches_count": 0, "new_clusters_count": 0, "new_ues_count": 0} + + assert set([node["name"] for node in graph_db.nodes.match("Cluster")]) == set(["DC" + str(i) for i in range(1, 7)]) + assert set([node["name"] for node in graph_db.nodes.match("Switch")]) == set(["127.0.0." + str(i) for i in range(1, 7)]) + assert set([node["name"] for node in graph_db.nodes.match("UserEquipment")]) == set(["ue" + str(i) for i in (2, 3, 6)]) + + def test_delete_network(self, graph_network_topology, db_testing_data): """ Tests the delete network graph functionality. + + :param graph_network_topology: fixture to generate the network topology and clean it afterwards + :param db_testing_data: fixture used to get a reference to the graph DB """ + from_timestamp, to_timestamp, graph_db = db_testing_data # fixture, used to get reference to the graph DB + + assert set([node["name"] for node in graph_db.nodes.match("Cluster")]) == set(["DC" + str(i) for i in range(1, 7)]), "Cluster nodes must have been created before the delete request" + assert set([node["name"] for node in graph_db.nodes.match("Switch")]) == set(["127.0.0." + str(i) for i in range(1, 7)]), "Switch nodes must have been created before the delete request" + assert set([node["name"] for node in graph_db.nodes.match("UserEquipment")]) == set(["ue" + str(i) for i in (2, 3, 6)]), "UE nodes must have been created before the delete request" + + # send the first delete request request = testing.DummyRequest() response = GraphAPI(request).delete_network_topology() - assert response == {"deleted_switches_count": 6, "deleted_clusters_count": 6, "deleted_ues_count": 3} + assert set([node["name"] for node in graph_db.nodes.match("Cluster")]) == set(), "Cluster nodes must have been deleted" + assert set([node["name"] for node in graph_db.nodes.match("Switch")]) == set(), "Switch nodes must have been deleted" + assert set([node["name"] for node in graph_db.nodes.match("UserEquipment")]) == set(), "UE nodes must have been deleted" + + # send the same request and ensure that nothing else is deleted + request = testing.DummyRequest() + response = GraphAPI(request).delete_network_topology() + assert response == {"deleted_switches_count": 0, "deleted_clusters_count": 0, "deleted_ues_count": 0} + + assert set([node["name"] for node in graph_db.nodes.match("Cluster")]) == set() + assert set([node["name"] for node in graph_db.nodes.match("Switch")]) == set() + assert set([node["name"] for node in graph_db.nodes.match("UserEquipment")]) == set() + @pytest.mark.parametrize("body, expected_error, error_msg", [ (None, "Configuration must be a JSON object.", "A bad request error must have been raised in case of missing request body."), ('{"query_period": 45, "results_measurement_name": "graph"}', "Invalid JSON query document.", "A bad request error must have been raised in case of invalid request body."),