Newer
Older
#!/usr/bin/python3
"""
// © University of Southampton IT Innovation Centre, 2018
//
// Copyright in this software belongs to University of Southampton
// IT Innovation Centre of Gamma House, Enterprise Road,
// Chilworth Science Park, Southampton, SO16 7NS, UK.
//
// This software may not be used, sold, licensed, transferred, copied
// or reproduced in whole or in part in any manner or form or in or
// on any media by any person other than in accordance with the terms
// of the Licence Agreement supplied with the software, or otherwise
// without the prior written consent of the copyright owners.
//
// This software is distributed WITHOUT ANY WARRANTY, without even the
// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE, except where stated in the Licence Agreement supplied with
// the software.
//
// Created By : Nikolay Stanchev
// Created Date : 09-07-2018
// Created for Project : FLAME
"""
from json import dumps
import pytest
from pyramid import testing
from clmcservice.graphapi.views import GraphAPI
from pyramid.httpexceptions import HTTPBadRequest, HTTPNotFound
class TestGraphAPI(object):
"""
A pytest-implementation test for the Graph API endpoints.
"""
# used to store graph UUIDs in the build test and reuse these in the delete test
graph_1_test_id = "graph_mock_uuid1"
graph_2_test_id = "graph_mock_uuid2"
@pytest.fixture(autouse=True)
def app_config(self):
"""
A fixture to implement setUp/tearDown functionality for all tests by initializing configuration structure for the web service and db connection
"""
self.registry = testing.setUp()
Nikolay Stanchev
committed
self.registry.add_settings({"neo4j_host": "localhost", "neo4j_password": "admin", "influx_host": "localhost", "influx_port": 8086, "network_bandwidth": 104857600})
@pytest.mark.parametrize("body, from_timestamp, to_timestamp, error_msg", [
(None, None, None, "A bad request error must have been raised in case of missing request body."),
('{}', 12341412, 1234897, "A bad request error must have been raised in case of invalid request body."),
('"service_function_chain": "sfc", "service_function_chain_instance": "sfci"}', 12341412, 1234897, "A bad request error must have been raised in case of invalid request body."),
('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": "{invalid_json}"}', 1528386860, 1528389860, "A bad request error must have been raised in case of invalid request body."),
('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": ["nginx", "minio"]}', 1528386860, 1528389860, "A bad request error must have been raised in case of invalid request body."),
('"service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
1528386860, 1528389860, "A bad request error must have been raised in case of missing service function chain value in the request body"),
('"service_function_chain": "sfc", "service_function_chain_instance": "sfcinstance", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
1528386860, 1528389860, "A bad request error must have been raised in case of invalid sfci ID in the request body"),
('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
"not a timestamp", "not a timestamp", "A bad request error must have been raised in case of invalid URL parameters."),
('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
None, "not a timestamp", "A bad request error must have been raised in case of invalid URL parameters."),
('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
2131212, None, "A bad request error must have been raised in case of invalid URL parameters."),
('"service_function_chain": "sfc", "service_function_chain_instance": "sfc_1", "service_functions": {"nginx": {"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)", "request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size"}}}',
2131212, 2131212, "A bad request error must have been raised in case of a non-existing database."),
])
def test_build_error_handling(self, body, from_timestamp, to_timestamp, error_msg):
Tests the error handling of the graph build API endpoint by passing erroneous input and confirming an HTTPBadRequest was returned.
:param body: body of the request to test
:param from_timestamp: the 'from' URL param
:param to_timestamp: the 'to' URL param
:param error_msg: the error message to pass in case of an error not being properly handled by the API endpoint (in other words, a test failure)
if body is not None:
request.body = body
if from_timestamp is not None:
request.params["from"] = from_timestamp
if to_timestamp is not None:
request.params["to"] = to_timestamp
error_raised = False
try:
GraphAPI(request).build_temporal_graph()
except HTTPBadRequest:
error_raised = True
assert error_raised, error_msg
@patch('clmcservice.graphapi.views.uuid4')
def test_build(self, uuid_mock, db_testing_data):
Tests the graph build API endpoint - it makes 2 API calls and checks that the expected graph was created (the influx data that's being used is reported to InfluxDB in the conftest file)
:param uuid_mock: mock object to mock the behaviour of the uuid4 function
:param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest)
from_timestamp, to_timestamp, graph_db = db_testing_data
ue_nodes = set([node["name"] for node in graph_db.nodes.match("UserEquipment")])
assert ue_nodes == set("ue" + str(i) for i in [2, 3, 6]), "UE nodes must have been created by the db_testing_data fixture"
Nikolay Stanchev
committed
dc_nodes = set([node["name"] for node in graph_db.nodes.match("Cluster")])
assert dc_nodes == set("DC" + str(i) for i in range(1, 7)), "Compute nodes must have been created by the db_testing_data fixture"
switch_nodes = set([node["name"] for node in graph_db.nodes.match("Switch")])
assert switch_nodes == set("127.0.0." + str(i) for i in range(1, 7)), "Switch nodes must have been created by the db_testing_data fixture"
Nikolay Stanchev
committed
service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)",
"request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
"request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
"request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"})
body = dumps(dict(service_function_chain="sfc", service_function_chain_instance="sfc_1", service_functions=service_functions))
request = testing.DummyRequest()
request.body = body.encode(request.charset)
# Create a valid build request and send it to the API endpoint
uuid_mock.return_value = self.graph_1_test_id
Nikolay Stanchev
committed
service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)",
"request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
"request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"})
build_json_body = dict(service_function_chain="test_sfc", service_function_chain_instance="test_sfc_premium", service_functions=service_functions)
Nikolay Stanchev
committed
build_json_body["from"] = from_timestamp
build_json_body["to"] = to_timestamp
body = dumps(build_json_body)
request = testing.DummyRequest()
request.body = body.encode(request.charset)
response = GraphAPI(request).build_temporal_graph()
assert response == {"database": "test_sfc"}, "Response must contain the database name"
assert graph_subresponse["uuid"] == self.graph_1_test_id, "Request UUID must be attached to the response."
assert graph_subresponse["time_range"]["from"] == from_timestamp * 10**9 # timestamp returned in nanoseconds
assert graph_subresponse["time_range"]["to"] == to_timestamp * 10**9 # timestamp returned in nanoseconds
request_id = graph_subresponse["uuid"]
# check that the appropriate nodes have been created
sfp_names = set([node["name"] for node in graph_db.nodes.match("ServiceFunctionPackage")])
assert sfp_names == {"nginx", "minio"}, "The graph must contain 2 service function packages - nginx and minio"
sf_names = set([node["name"] for node in graph_db.nodes.match("ServiceFunction")])
assert sf_names == {"nginx_1", "minio_1"}, "The graph must contain 2 service functions - nginx_1 and minio_1"
endpoints = set([node["name"] for node in graph_db.nodes.match("Endpoint", uuid=request_id)])
assert endpoints == {"minio_1_ep1", "nginx_1_ep1", "nginx_1_ep2"}, "The graph must contain 3 endpoints - minio_1_ep1, nginx_1_ep1, nginx_1_ep2"
sfci_names = set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChainInstance")])
assert sfci_names == {"test_sfc_premium"}, "The graph must contain 1 service function chain instance - test_sfc_premium"
Nikolay Stanchev
committed
sfc_names = set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChain")])
assert sfc_names == {"test_sfc"}, "The graph must contain 1 service function chain - test_sfc"
reference_node = graph_db.nodes.match("Reference", uuid=request_id, sfci="test_sfc_premium", sfc="test_sfc").first()
assert reference_node is not None and reference_node["from"] == from_timestamp * 10**9 and reference_node["to"] == to_timestamp * 10**9, "Reference node must have been created"
# check the appropriate edges have been created
self.check_exist_relationship(
(
Nikolay Stanchev
committed
("minio_1_ep1", "Endpoint", "DC4", "Cluster", "hostedBy"),
("nginx_1_ep1", "Endpoint", "DC4", "Cluster", "hostedBy"),
("nginx_1_ep2", "Endpoint", "DC6", "Cluster", "hostedBy"),
("minio_1", "ServiceFunction", "minio_1_ep1", "Endpoint", "realisedBy"),
("nginx_1", "ServiceFunction", "nginx_1_ep1", "Endpoint", "realisedBy"),
("nginx_1", "ServiceFunction", "nginx_1_ep2", "Endpoint", "realisedBy"),
("minio_1", "ServiceFunction", "minio", "ServiceFunctionPackage", "instanceOf"),
("nginx_1", "ServiceFunction", "test_sfc_premium", "ServiceFunctionChainInstance", "utilizedBy"),
("minio_1", "ServiceFunction", "test_sfc_premium", "ServiceFunctionChainInstance", "utilizedBy"),
("nginx", "ServiceFunctionPackage", "test_sfc", "ServiceFunctionChain", "utilizedBy"),
("minio", "ServiceFunctionPackage", "test_sfc", "ServiceFunctionChain", "utilizedBy"),
("test_sfc_premium", "ServiceFunctionChainInstance", "test_sfc", "ServiceFunctionChain", "instanceOf"),
# check endpoint nodes have the correct properties
Nikolay Stanchev
committed
for endpoint, response_time, request_size, response_size in (("minio_1_ep1", 9, 5760, 2033), ("nginx_1_ep1", 18.2, 2260, 9660), ("nginx_1_ep2", 22.2, 35600, 6420)):
endpoint_node = graph_db.nodes.match("Endpoint", name=endpoint, uuid=request_id).first()
assert endpoint_node["response_time"] == response_time, "Wrong response time property of endpoint node"
# approximation is used to avoid long float numbers retrieved from influx, the test case ensures the results are different enough so that approximation of +-1 is good enough for testing
Nikolay Stanchev
committed
assert endpoint_node["request_size"] == pytest.approx(request_size, 1), "Wrong request size attribute of endpoint node"
assert endpoint_node["response_size"] == pytest.approx(response_size, 1), "Wrong response size attribute of endpoint node"
# send a new request for a new service function chain instance and check the new subgraph has been created
uuid_mock.return_value = self.graph_2_test_id
Nikolay Stanchev
committed
service_functions = dict(minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
"request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
"request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"})
build_json_body = dict(service_function_chain="test_sfc", service_function_chain_instance="test_sfc_non_premium", service_functions=service_functions)
Nikolay Stanchev
committed
build_json_body["from"] = from_timestamp
build_json_body["to"] = to_timestamp
body = dumps(build_json_body)
request = testing.DummyRequest()
request.body = body.encode(request.charset)
response = GraphAPI(request).build_temporal_graph()
assert response == {"database": "test_sfc"}, "Response must contain the database name"
assert graph_subresponse["uuid"] == self.graph_2_test_id, "Request UUID must be attached to the response."
assert graph_subresponse["time_range"]["from"] == from_timestamp * 10**9 # timestamp returned in nanoseconds
assert graph_subresponse["time_range"]["to"] == to_timestamp * 10**9 # timestamp returned in nanoseconds
# check the new nodes have been created
assert graph_db.nodes.match("ServiceFunctionPackage", name="apache").first() is not None, "Service function package apache must have been added to the graph"
for sf in ("apache_1", "minio_2"):
assert graph_db.nodes.match("ServiceFunction", name=sf).first() is not None, "Service function {0} must have been added to the graph".format(sf)
for ep in ("minio_2_ep1", "apache_1_ep1"):
assert graph_db.nodes.match("Endpoint", name=ep, uuid=request_id).first() is not None, "Endpoint {0} must have been added to the graph".format(ep)
assert graph_db.nodes.match("ServiceFunctionChainInstance", name="test_sfc_non_premium").first() is not None, "Service function chain instance test_sfc_non_premium must have been added to the graph"
assert graph_db.nodes.match("ServiceFunctionChain", name="test_sfc").first() is not None, "Service function chain test_sfc must have been added to the graph"
Nikolay Stanchev
committed
reference_node = graph_db.nodes.match("Reference", uuid=request_id, sfci="test_sfc_non_premium", sfc="test_sfc").first()
assert reference_node is not None and reference_node["from"] == from_timestamp * 10**9 and reference_node["to"] == to_timestamp * 10**9, "Reference node must have been created"
# check the appropriate edges have been created
self.check_exist_relationship(
(
Nikolay Stanchev
committed
("minio_2_ep1", "Endpoint", "DC5", "Cluster", "hostedBy"),
("apache_1_ep1", "Endpoint", "DC5", "Cluster", "hostedBy"),
("minio_2", "ServiceFunction", "minio_2_ep1", "Endpoint", "realisedBy"),
("apache_1", "ServiceFunction", "apache_1_ep1", "Endpoint", "realisedBy"),
("minio_2", "ServiceFunction", "minio", "ServiceFunctionPackage", "instanceOf"),
("apache_1", "ServiceFunction", "apache", "ServiceFunctionPackage", "instanceOf"),
("minio_2", "ServiceFunction", "test_sfc_non_premium", "ServiceFunctionChainInstance", "utilizedBy"),
("apache_1", "ServiceFunction", "test_sfc_non_premium", "ServiceFunctionChainInstance", "utilizedBy"),
("minio", "ServiceFunctionPackage", "test_sfc", "ServiceFunctionChain", "utilizedBy"),
("apache", "ServiceFunctionPackage", "test_sfc", "ServiceFunctionChain", "utilizedBy"),
("test_sfc_non_premium", "ServiceFunctionChainInstance", "test_sfc", "ServiceFunctionChain", "instanceOf")
# check endpoint nodes have the correct properties
Nikolay Stanchev
committed
for endpoint, response_time, request_size, response_size in (("minio_2_ep1", 7, 2998, 3610), ("apache_1_ep1", 17.6, 1480, 7860)):
endpoint_node = graph_db.nodes.match("Endpoint", name=endpoint, uuid=request_id).first()
assert endpoint_node["response_time"] == response_time, "Wrong response time property of endpoint node"
# approximation is used to avoid long float numbers retrieved from influx, the test case ensures the results are different enough so that approximation of +-1 is good enough for testing
Nikolay Stanchev
committed
assert endpoint_node["request_size"] == pytest.approx(request_size, 1), "Wrong request size attribute of endpoint node"
assert endpoint_node["response_size"] == pytest.approx(response_size, 1), "Wrong response size attribute of endpoint node"
Tests the delete API endpoint of the Graph API - the test depends on the build test to have been passed successfully so that graph_1_id and graph_2_id have been set
:param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest)
from_timestamp, to_timestamp, graph_db = db_testing_data
request = testing.DummyRequest()
request.matchdict["graph_id"] = "invalid_graph_id"
error_raised = False
try:
GraphAPI(request).delete_temporal_graph()
except HTTPNotFound:
error_raised = True
assert error_raised, "HTTP Not Found error must be raised in case of unrecognized subgraph ID"
# delete the graph associated with graph_1_id
request.matchdict["graph_id"] = self.graph_1_test_id
assert response == {"deleted": 4}, "Incorrect response when deleting temporal graph"
# delete the graph associated with graph_2_id
request.matchdict["graph_id"] = self.graph_2_test_id
assert response == {"deleted": 3}, "Incorrect response when deleting temporal graph"
assert len(graph_db.nodes.match("Endpoint")) == 0, "All endpoint nodes should have been deleted"
assert set([node["name"] for node in graph_db.nodes.match("Cluster")]) == set(["DC" + str(i) for i in range(1, 7)]), "Cluster nodes must not be deleted"
assert set([node["name"] for node in graph_db.nodes.match("Switch")]) == set(["127.0.0." + str(i) for i in range(1, 7)]), "Switch nodes must not be deleted"
assert set([node["name"] for node in graph_db.nodes.match("UserEquipment")]) == set(["ue" + str(i) for i in (2, 3, 6)]), "UE nodes must not be deleted"
assert set([node["name"] for node in graph_db.nodes.match("ServiceFunction")]) == {"nginx_1", "apache_1", "minio_1", "minio_2"}, "Service functions must not be deleted."
assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionPackage")]) == {"nginx", "minio", "apache"}, "Service function packages must not be deleted"
assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChainInstance")]) == {"test_sfc_premium", "test_sfc_non_premium"}, "Service function chain instances must not be deleted"
assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChain")]) == {"test_sfc"}, "Service function chains must not be deleted"
@pytest.mark.parametrize("graph_id, endpoint, startpoint, error_type, error_msg", [
('e8cd4768-47dd-48cd-9c74-7f8926ddbad8', None, None, HTTPBadRequest, "HTTP Bad Request must be thrown in case of missing or invalid url parameters"),
('e8cd4768-47dd-48cd-9c74-7f8926ddbad8', None, "nginx", HTTPBadRequest, "HTTP Bad Request must be thrown in case of missing or invalid url parameters"),
('e8cd4768-47dd-48cd-9c74-7f8926ddbad8', "nginx_1_ep1", None, HTTPBadRequest, "HTTP Bad Request must be thrown in case of missing or invalid url parameters"),
('random-uuid', "nginx_1_ep1", "nginx", HTTPNotFound, "HTTP Not Found error must be thrown for an endpoint node with incorrect request ID"),
('random-uuid', "minio_1_ep1", "minio", HTTPNotFound, "HTTP Not Found error must be thrown for an endpoint node with incorrect request ID"),
])
def test_rtt_error_handling(self, graph_id, endpoint, startpoint, error_type, error_msg):
Tests the error handling of the graph round trip time API endpoint - achieved by sending erroneous input in the request and verifying the appropriate error type has been returned.
:param graph_id: the UUID of the subgraph
:param endpoint: endpoint ID
:param error_type: error type to expect as a response
:param error_msg: error message in case of a test failure
"""
request = testing.DummyRequest()
request.matchdict["graph_id"] = graph_id
if endpoint is not None:
request.params["endpoint"] = endpoint
if startpoint is not None:
request.params["startpoint"] = startpoint
error_raised = False
try:
GraphAPI(request).run_rtt_query()
except error_type:
error_raised = True
assert error_raised, error_msg
Tests the rtt API endpoint of the Graph API.
:param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest)
from_timestamp, to_timestamp, graph_db = db_testing_data
# create a graph to use for RTT test by using the build API endpoint
Nikolay Stanchev
committed
service_functions = dict(nginx={"measurement_name": "nginx", "response_time_field": "mean(avg_processing_time)",
"request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"},
minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
"request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"})
build_json_body = dict(service_function_chain="test_sfc", service_function_chain_instance="test_sfc_premium", service_functions=service_functions)
Nikolay Stanchev
committed
build_json_body["from"] = from_timestamp
build_json_body["to"] = to_timestamp
body = dumps(build_json_body)
request = testing.DummyRequest()
request.body = body.encode(request.charset)
response = GraphAPI(request).build_temporal_graph()
request_id = response["graph"]["uuid"]
# test some more error case handling of the RTT API endpoint
request = testing.DummyRequest()
request.matchdict["graph_id"] = request_id
request.params["endpoint"] = "nginx_1_ep1"
request.params["compute"] = "DC1"
error_raised = False
try:
GraphAPI(request).run_rtt_query()
except HTTPBadRequest:
error_raised = True
assert error_raised, "HTTP Bad Request must be thrown in case of missing or invalid url parameters"
request = testing.DummyRequest()
request.matchdict["graph_id"] = request_id
request.params["endpoint"] = "nginx_1_ep1"
error_raised = False
try:
GraphAPI(request).run_rtt_query()
except HTTPNotFound:
error_raised = True
assert error_raised, "HTTP Not Found error must be thrown for non existing compute node"
request = testing.DummyRequest()
request.matchdict["graph_id"] = request_id
request.params["endpoint"] = "apache_1_ep1"
error_raised = False
try:
GraphAPI(request).run_rtt_query()
except HTTPNotFound:
error_raised = True
assert error_raised, "HTTP Not Found error must be thrown for a non existing endpoint"
# go through the set of input/output (expected) parameters and assert actual results match with expected ones
for startpoint, endpoint, forward_latencies, reverse_latencies, response_time, request_size, response_size, rtt, global_tags in (
("DC6", "nginx_1_ep2", [], [], 22.2, 35600, 6420, 22.2, {"flame_location": "DC6", "flame_sfe": "nginx_1_ep2", "flame_server": "DC6", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"}),
("127.0.0.6", "nginx_1_ep2", [0], [0], 22.2, 35600, 6420, 22.2, {"flame_location": "DC6", "flame_sfe": "nginx_1_ep2", "flame_server": "DC6", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"}),
Nikolay Stanchev
committed
("ue6", "nginx_1_ep2", [0, 0], [0, 0], 22.2, 35600, 6420, 22.2, {"flame_location": "DC6", "flame_sfe": "nginx_1_ep2", "flame_server": "DC6", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"}),
Nikolay Stanchev
committed
("DC2", "nginx_1_ep2", [0, 7.5, 15, 4.5, 0], [0, 4.5, 15, 7.5, 0], 22.2, 35600, 6420, 78, {"flame_location": "DC6", "flame_sfe": "nginx_1_ep2", "flame_server": "DC6", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"}),
("127.0.0.2", "nginx_1_ep2", [7.5, 15, 4.5, 0], [0, 4.5, 15, 7.5], 22.2, 35600, 6420, 78, {"flame_location": "DC6", "flame_sfe": "nginx_1_ep2", "flame_server": "DC6", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"}),
("DC3", "nginx_1_ep1", [0, 12.5, 0], [0, 12.5, 0], 18.2, 2260, 9660, 38, {"flame_location": "DC4", "flame_sfe": "nginx_1_ep1", "flame_server": "DC4", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"}),
Nikolay Stanchev
committed
("127.0.0.3", "nginx_1_ep1", [12.5, 0], [0, 12.5], 18.2, 2260, 9660, 38, {"flame_location": "DC4", "flame_sfe": "nginx_1_ep1", "flame_server": "DC4", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"}),
("ue3", "nginx_1_ep1", [0, 12.5, 0], [0, 12.5, 0], 18.2, 2260, 9660, 38, {"flame_location": "DC4", "flame_sfe": "nginx_1_ep1", "flame_server": "DC4", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_premium", "flame_sfp": "nginx", "flame_sf": "nginx_1"})
):
request = testing.DummyRequest()
request.matchdict["graph_id"] = request_id
request.params["endpoint"] = endpoint
request.params["startpoint"] = startpoint
# approximation is used to avoid long float numbers retrieved from influx, the test case ensures the results are different enough so that approximation of +-1 is good enough for testing
Nikolay Stanchev
committed
assert response.pop("round_trip_time") == pytest.approx(rtt, 1), "Incorrect RTT response"
assert response == {"forward_latencies": forward_latencies, "reverse_latencies": reverse_latencies, "total_forward_latency": sum(forward_latencies), "total_reverse_latency": sum(reverse_latencies),
"bandwidth": 104857600, "response_time": response_time, "local_tags": {"traffic_source": startpoint}, "global_tags": global_tags,
Nikolay Stanchev
committed
"request_size": request_size, "response_size": response_size}, "Incorrect RTT response"
# send a new request for a new service function chain to create a second subgraph to test
Nikolay Stanchev
committed
service_functions = dict(minio={"measurement_name": "minio_http", "response_time_field": "mean(total_processing_time)/mean(total_requests_count)",
"request_size_field": "mean(total_requests_size)/mean(total_requests_count)", "response_size_field": "mean(total_response_size)/mean(total_requests_count)"},
apache={"measurement_name": "apache", "response_time_field": "mean(avg_processing_time)",
"request_size_field": "mean(avg_request_size)", "response_size_field": "mean(avg_response_size)"})
build_json_body = dict(service_function_chain="test_sfc", service_function_chain_instance="test_sfc_non_premium", service_functions=service_functions)
Nikolay Stanchev
committed
build_json_body["from"] = from_timestamp
build_json_body["to"] = to_timestamp
body = dumps(build_json_body)
request = testing.DummyRequest()
request.body = body.encode(request.charset)
response = GraphAPI(request).build_temporal_graph()
request_id = response["graph"]["uuid"]
# go through the set of input/output (expected) parameters and assert actual results match with expected ones
for startpoint, endpoint, forward_latencies, reverse_latencies, response_time, request_size, response_size, rtt, global_tags in (
("DC5", "apache_1_ep1", [], [], 17.6, 1480, 7860, 17.6, {"flame_location": "DC5", "flame_sfe": "apache_1_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "apache", "flame_sf": "apache_1"}),
("127.0.0.5", "apache_1_ep1", [0], [0], 17.6, 1480, 7860, 17.6, {"flame_location": "DC5", "flame_sfe": "apache_1_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "apache", "flame_sf": "apache_1"}),
("DC5", "minio_2_ep1", [], [], 7, 2998, 3610, 7, {"flame_location": "DC5", "flame_sfe": "minio_2_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "minio", "flame_sf": "minio_2"}),
("127.0.0.5", "minio_2_ep1", [0], [0], 7, 2998, 3610, 7, {"flame_location": "DC5", "flame_sfe": "minio_2_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "minio", "flame_sf": "minio_2"}),
Nikolay Stanchev
committed
("DC3", "apache_1_ep1", [0, 9, 15, 0], [0, 15, 9, 0], 17.6, 1480, 7860, 64, {"flame_location": "DC5", "flame_sfe": "apache_1_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "apache", "flame_sf": "apache_1"}),
("127.0.0.3", "apache_1_ep1", [9, 15, 0], [0, 15, 9], 17.6, 1480, 7860, 64, {"flame_location": "DC5", "flame_sfe": "apache_1_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "apache", "flame_sf": "apache_1"}),
Nikolay Stanchev
committed
("ue3", "apache_1_ep1", [0, 9, 15, 0], [0, 15, 9, 0], 17.6, 1480, 7860, 64, {"flame_location": "DC5", "flame_sfe": "apache_1_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "apache", "flame_sf": "apache_1"}),
("DC2", "minio_2_ep1", [0, 7.5, 15, 0], [0, 15, 7.5, 0], 7, 2998, 3610, 53, {"flame_location": "DC5", "flame_sfe": "minio_2_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "minio", "flame_sf": "minio_2"}),
Nikolay Stanchev
committed
("127.0.0.2", "minio_2_ep1", [7.5, 15, 0], [0, 15, 7.5], 7, 2998, 3610, 53, {"flame_location": "DC5", "flame_sfe": "minio_2_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "minio", "flame_sf": "minio_2"}),
("ue2", "minio_2_ep1", [0, 7.5, 15, 0], [0, 15, 7.5, 0], 7, 2998, 3610, 53, {"flame_location": "DC5", "flame_sfe": "minio_2_ep1", "flame_server": "DC5", "flame_sfc": "test_sfc", "flame_sfci": "test_sfc_non_premium", "flame_sfp": "minio", "flame_sf": "minio_2"})
):
request = testing.DummyRequest()
request.matchdict["graph_id"] = request_id
request.params["endpoint"] = endpoint
request.params["startpoint"] = startpoint
# approximation is used to avoid long float numbers retrieved from influx, the test case ensures the results are different enough so that approximation of +-1 is good enough for testing
Nikolay Stanchev
committed
assert response.pop("request_size") == pytest.approx(request_size, 1), "Incorrect RTT response"
assert response.pop("response_size") == pytest.approx(response_size, 1), "Incorrect RTT response"
assert response.pop("round_trip_time") == pytest.approx(rtt, 1), "Incorrect RTT response"
assert response == {"forward_latencies": forward_latencies, "reverse_latencies": reverse_latencies, "total_forward_latency": sum(forward_latencies), "total_reverse_latency": sum(reverse_latencies),
"bandwidth": 104857600, "response_time": response_time, "local_tags": {"traffic_source": startpoint}, "global_tags": global_tags}, "Incorrect RTT response"
def test_delete_network_graph(self):
"""
Tests the delete network graph functionality.
"""
request = testing.DummyRequest()
response = GraphAPI(request).delete_network_topology()
assert response == {"deleted_switches_count": 6, "deleted_clusters_count": 6, "deleted_ues_count": 3}
@staticmethod
def check_exist_relationship(relationships_tuple, graph, uuid):
Iterates through a tuple of relationships and checks that each of those exists - a utility method to be reused for testing.
:param relationships_tuple: the tuple to iterate
:param graph: the graph object
:param uuid: the uuid of the request
"""
for relationship in relationships_tuple:
from_node_name, from_node_type, to_node_name, to_node_type, relationship_type = relationship
if from_node_type == "Endpoint":
from_node = graph.nodes.match(from_node_type, name=from_node_name, uuid=uuid).first()
else:
from_node = graph.nodes.match(from_node_type, name=from_node_name).first()
assert from_node is not None # IMPORTANT, assert the from_node exists, otherwise the py2neo RelationshipMatcher object assumes you are looking for any node (instead of raising an error)
if to_node_type == "Endpoint":
to_node = graph.nodes.match(to_node_type, name=to_node_name, uuid=uuid).first()
else:
to_node = graph.nodes.match(to_node_type, name=to_node_name).first()
assert to_node is not None # IMPORTANT, assert the from_node exists, otherwise the py2neo RelationshipMatcher object assumes you are looking for any node (instead of raising an error)
assert graph.relationships.match(nodes=(from_node, to_node), r_type=relationship_type).first() is not None, "Graph is missing a required relationship"