Skip to content
Snippets Groups Projects
Commit cd380d0f authored by Nikolay Stanchev's avatar Nikolay Stanchev
Browse files

Adds a delete API endpoint for a full media service graph identified by its SFC id

parent 89ac37e2
No related branches found
No related tags found
No related merge requests found
...@@ -352,6 +352,26 @@ with **/clmc-service** so that the nginx reverse proxy server (listening on port ...@@ -352,6 +352,26 @@ with **/clmc-service** so that the nginx reverse proxy server (listening on port
} }
``` ```
* **DELETE** ***/graph/{service_function_chain_identifier}***
This API method send a request to delete a full media service graph identified by its service function chain identifier - this means deleting the SFC node, all SFC instance nodes
linked to the SFC node, all SF package, SF nodes and temporal SF endpoint nodes which are part of the graph for this SFC.
* Response:
The response of this request is a JSON content, which contains the number of deleted nodes.
Returns a 404 Not Found error if the SFC identifier is not associated with any SFC nodes in the graph.
* Response Body Example:
```json
{
"deleted": 10
}
```
* **GET** ***/graph/temporal/{graph_id}/round-trip-time?startpoint={startpoint_id}&endpoint={endpoint_id}*** * **GET** ***/graph/temporal/{graph_id}/round-trip-time?startpoint={startpoint_id}&endpoint={endpoint_id}***
This API method sends a request to run the Cypher Round-Trip-Time query over a temporal graph associated with a request UUID (retrieved from the response of a build-graph request). This API method sends a request to run the Cypher Round-Trip-Time query over a temporal graph associated with a request UUID (retrieved from the response of a build-graph request).
......
...@@ -67,7 +67,8 @@ def main(global_config, **settings): ...@@ -67,7 +67,8 @@ def main(global_config, **settings):
# add routes of the GRAPH API # add routes of the GRAPH API
config.add_route('graph_build', '/graph/temporal') config.add_route('graph_build', '/graph/temporal')
config.add_route('graph_manage', '/graph/temporal/{graph_id}') config.add_route('temporal_graph_manage', '/graph/temporal/{graph_id}')
config.add_route('full_graph_manage', '/graph/{sfc_id}')
config.add_route('graph_algorithms_rtt', '/graph/temporal/{graph_id}/round-trip-time') config.add_route('graph_algorithms_rtt', '/graph/temporal/{graph_id}/round-trip-time')
config.add_route('graph_network_topology', '/graph/network') config.add_route('graph_network_topology', '/graph/network')
config.add_route('graph_execute_pipeline', '/graph/monitor') config.add_route('graph_execute_pipeline', '/graph/monitor')
......
...@@ -107,7 +107,7 @@ class TestGraphAPI(object): ...@@ -107,7 +107,7 @@ class TestGraphAPI(object):
Tests the graph build API endpoint - it makes 2 API calls and checks that the expected graph was created (the influx data that's being used is reported to InfluxDB in the conftest file) Tests the graph build API endpoint - it makes 2 API calls and checks that the expected graph was created (the influx data that's being used is reported to InfluxDB in the conftest file)
:param uuid_mock: mock object to mock the behaviour of the uuid4 function :param uuid_mock: mock object to mock the behaviour of the uuid4 function
:param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest) :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data and the graph db client object (this is a fixture from conftest)
""" """
from_timestamp, to_timestamp, graph_db = db_testing_data from_timestamp, to_timestamp, graph_db = db_testing_data
...@@ -123,7 +123,7 @@ class TestGraphAPI(object): ...@@ -123,7 +123,7 @@ class TestGraphAPI(object):
# Create a valid build request and send it to the API endpoint # Create a valid build request and send it to the API endpoint
uuid_mock.return_value = "graph_test_build_uuid1" uuid_mock.return_value = "graph_test_build_uuid1"
responses = graph_generator(from_timestamp, to_timestamp) responses = graph_generator(from_timestamp, to_timestamp) # generates 2 graphs by sending request to the build API and yielding back the responses for each request
response = next(responses) response = next(responses)
graph_subresponse = response.pop("graph") graph_subresponse = response.pop("graph")
...@@ -231,18 +231,24 @@ class TestGraphAPI(object): ...@@ -231,18 +231,24 @@ class TestGraphAPI(object):
assert endpoint_node["request_size"] == pytest.approx(request_size, 1), "Wrong request size attribute of endpoint node" assert endpoint_node["request_size"] == pytest.approx(request_size, 1), "Wrong request size attribute of endpoint node"
assert endpoint_node["response_size"] == pytest.approx(response_size, 1), "Wrong response size attribute of endpoint node" assert endpoint_node["response_size"] == pytest.approx(response_size, 1), "Wrong response size attribute of endpoint node"
request = testing.DummyRequest()
request.matchdict["sfc_id"] = "test_sfc"
response = GraphAPI(request).delete_full_graph()
assert response == {"deleted": 17}
@patch('clmcservice.graphapi.views.uuid4') @patch('clmcservice.graphapi.views.uuid4')
def test_delete(self, uuid_mock, db_testing_data): def test_delete_temporal(self, uuid_mock, db_testing_data):
""" """
Tests the delete API endpoint of the Graph API - the test depends on the build test to have been passed successfully so that graph_1_id and graph_2_id have been set Tests the delete API endpoint of the Graph API - the test depends on the build test to have been passed successfully so that graph_1_id and graph_2_id have been set
:param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest) :param uuid_mock: the mock object used to mimic the behaviour of the uuid.uuid4 function
:param db_testing_data: pair of time stamps - the from-to range of the generated influx test data and the graph db client object (this is a fixture from conftest)
""" """
from_timestamp, to_timestamp, graph_db = db_testing_data from_timestamp, to_timestamp, graph_db = db_testing_data
# build test graphs # build test graphs
responses = graph_generator(from_timestamp, to_timestamp) responses = graph_generator(from_timestamp, to_timestamp) # generates 2 graphs by sending request to the build API and yielding back the responses for each request
uuid_mock.return_value = "graph_test_delete_uuid1" uuid_mock.return_value = "graph_test_delete_uuid1"
graph_1_test_id = uuid_mock.return_value graph_1_test_id = uuid_mock.return_value
...@@ -282,11 +288,46 @@ class TestGraphAPI(object): ...@@ -282,11 +288,46 @@ class TestGraphAPI(object):
assert set([node["name"] for node in graph_db.nodes.match("Cluster")]) == set(["DC" + str(i) for i in range(1, 7)]), "Cluster nodes must not be deleted" assert set([node["name"] for node in graph_db.nodes.match("Cluster")]) == set(["DC" + str(i) for i in range(1, 7)]), "Cluster nodes must not be deleted"
assert set([node["name"] for node in graph_db.nodes.match("Switch")]) == set(["127.0.0." + str(i) for i in range(1, 7)]), "Switch nodes must not be deleted" assert set([node["name"] for node in graph_db.nodes.match("Switch")]) == set(["127.0.0." + str(i) for i in range(1, 7)]), "Switch nodes must not be deleted"
assert set([node["name"] for node in graph_db.nodes.match("UserEquipment")]) == set(["ue" + str(i) for i in (2, 3, 6)]), "UE nodes must not be deleted" assert set([node["name"] for node in graph_db.nodes.match("UserEquipment")]) == set(["ue" + str(i) for i in (2, 3, 6)]), "UE nodes must not be deleted"
assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionEndpoint")]) == set(), "Endpoint nodes must have been deleted"
assert set([node["name"] for node in graph_db.nodes.match("ServiceFunction")]) == {"nginx_1", "apache_1", "minio_1", "minio_2"}, "Service functions must not be deleted." assert set([node["name"] for node in graph_db.nodes.match("ServiceFunction")]) == {"nginx_1", "apache_1", "minio_1", "minio_2"}, "Service functions must not be deleted."
assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionPackage")]) == {"nginx", "minio", "apache"}, "Service function packages must not be deleted" assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionPackage")]) == {"nginx", "minio", "apache"}, "Service function packages must not be deleted"
assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChainInstance")]) == {"test_sfc_premium", "test_sfc_non_premium"}, "Service function chain instances must not be deleted" assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChainInstance")]) == {"test_sfc_premium", "test_sfc_non_premium"}, "Service function chain instances must not be deleted"
assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChain")]) == {"test_sfc"}, "Service function chains must not be deleted" assert set([node["name"] for node in graph_db.nodes.match("ServiceFunctionChain")]) == {"test_sfc"}, "Service function chains must not be deleted"
request = testing.DummyRequest()
request.matchdict["sfc_id"] = "test_sfc"
response = GraphAPI(request).delete_full_graph()
assert response == {"deleted": 10}
def test_delete_full(self, db_testing_data):
"""
Tests the functionality to delete the full media service graph starting from a service function chain node down to an endpoint node.
:param db_testing_data: pair of time stamps - the from-to range of the generated influx test data and the graph db client object (this is a fixture from conftest)
:return:
"""
from_timestamp, to_timestamp, graph_db = db_testing_data
responses = graph_generator(from_timestamp, to_timestamp) # generates 2 graphs by sending request to the build API and yielding back the responses for each request
next(responses)
next(responses)
request = testing.DummyRequest()
request.matchdict["sfc_id"] = "invalid_test_sfc"
error_raised = False
try:
GraphAPI(request).delete_full_graph()
except HTTPNotFound:
error_raised = True
assert error_raised, "Error must have been raised for invalid SFC identifier"
request = testing.DummyRequest()
request.matchdict["sfc_id"] = "test_sfc"
response = GraphAPI(request).delete_full_graph()
assert response == {"deleted": 17}
@pytest.mark.parametrize("graph_id, endpoint, startpoint, error_type, error_msg", [ @pytest.mark.parametrize("graph_id, endpoint, startpoint, error_type, error_msg", [
('e8cd4768-47dd-48cd-9c74-7f8926ddbad8', None, None, HTTPBadRequest, "HTTP Bad Request must be thrown in case of missing or invalid url parameters"), ('e8cd4768-47dd-48cd-9c74-7f8926ddbad8', None, None, HTTPBadRequest, "HTTP Bad Request must be thrown in case of missing or invalid url parameters"),
('e8cd4768-47dd-48cd-9c74-7f8926ddbad8', None, "nginx", HTTPBadRequest, "HTTP Bad Request must be thrown in case of missing or invalid url parameters"), ('e8cd4768-47dd-48cd-9c74-7f8926ddbad8', None, "nginx", HTTPBadRequest, "HTTP Bad Request must be thrown in case of missing or invalid url parameters"),
...@@ -322,13 +363,13 @@ class TestGraphAPI(object): ...@@ -322,13 +363,13 @@ class TestGraphAPI(object):
""" """
Tests the rtt API endpoint of the Graph API. Tests the rtt API endpoint of the Graph API.
:param db_testing_data: pair of time stamps - the from-to range of the generated influx test data, test database name and the graph db client object (this is a fixture from conftest) :param db_testing_data: pair of time stamps - the from-to range of the generated influx test data and the graph db client object (this is a fixture from conftest)
""" """
from_timestamp, to_timestamp, graph_db = db_testing_data from_timestamp, to_timestamp, graph_db = db_testing_data
# create a graph to use for RTT test by using the build API endpoint # create a graph to use for RTT test by using the build API endpoint
responses = graph_generator(from_timestamp, to_timestamp) responses = graph_generator(from_timestamp, to_timestamp) # generates 2 graphs by sending request to the build API and yielding back the responses for each request
response = next(responses) response = next(responses)
request_id = response["graph"]["uuid"] request_id = response["graph"]["uuid"]
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
from json import loads from json import loads
from py2neo import Node, Relationship from py2neo import Node, Relationship
import logging from logging import getLogger
GRAPH_ROUND_TRIP_TIME_URL_PARAMS = ("startpoint", "endpoint") GRAPH_ROUND_TRIP_TIME_URL_PARAMS = ("startpoint", "endpoint")
...@@ -60,7 +60,7 @@ RETURN latencies as forward_latencies, reverse(latencies) as reverse_latencies, ...@@ -60,7 +60,7 @@ RETURN latencies as forward_latencies, reverse(latencies) as reverse_latencies,
# """ # """
log = logging.getLogger('service_logger') log = getLogger('service_logger')
def validate_build_request_body(body): def validate_build_request_body(body):
...@@ -461,3 +461,46 @@ def find_node_with_possible_types(name, possible_types, graph): ...@@ -461,3 +461,46 @@ def find_node_with_possible_types(name, possible_types, graph):
return node, type_ return node, type_
return None, None return None, None
def depth_first_search(graph, root_node):
"""
A generator, which performs a depth-first search through the graph starting from a root node and stopping when it reaches an Endpoint node.
:param graph: the graph db client
:param root_node: the root node reference (e.g. a ServiceFunctionChain node)
:return: a sequence of nodes traversed in depth-first manner
"""
# a separate stack used to store the nodes in post-order
post_order = []
# a map between the type of a node and the type of edges that are going to be searched - e.g. for nodes of type ServiceFunctionChain, look for utilizedBy and instanceOf edges
node_edges_labels = {"ServiceFunctionChain": ("utilizedBy", "instanceOf"), "ServiceFunctionChainInstance": ("utilizedBy",), "ServiceFunction": ("realisedBy",)}
# the stack will hold a node and the type of labels
stack = [root_node]
log.info("Performing DFS starting from node {0}".format(root_node["name"]))
# simple declarative Depth-First search using a stack
while len(stack) > 0:
current_node = stack.pop()
post_order.append(current_node)
current_node_type = list(current_node.labels)[0] # a node might have multiple labels in Neo4j, but in our scenario we expect exactly one label
for edge_label in node_edges_labels.get(current_node_type, []): # for each possible edge label of the given node type, or empty list by default value
for relationship in graph.match({current_node, }, r_type=edge_label): # we use a set because we do not care for direction
# the direction of the relationship is ambiguous so check which end of it we need to append
if relationship.end_node == current_node:
stack.append(relationship.start_node)
else:
stack.append(relationship.end_node)
log.info("Performing post order iteration after DFS is finished.")
# yield the elements in post order
while len(post_order) > 0:
node = post_order.pop()
print(node["name"])
yield node
log.debug("Yielding node {0}".format(node["name"]))
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
from clmcservice.graphapi.utilities import validate_build_request_body, validate_monitor_request_body, RTT_CYPHER_QUERY_TEMPLATE, \ from clmcservice.graphapi.utilities import validate_build_request_body, validate_monitor_request_body, RTT_CYPHER_QUERY_TEMPLATE, \
build_network_graph, delete_network_graph, build_temporal_subgraph, delete_temporal_subgraph, validate_graph_rtt_params, find_node_with_possible_types build_network_graph, delete_network_graph, build_temporal_subgraph, delete_temporal_subgraph, validate_graph_rtt_params, find_node_with_possible_types, depth_first_search
from clmcservice.models import MonitoringProcess from clmcservice.models import MonitoringProcess
from influxdb import InfluxDBClient from influxdb import InfluxDBClient
from py2neo import Graph from py2neo import Graph
...@@ -91,7 +91,7 @@ class GraphAPI(object): ...@@ -91,7 +91,7 @@ class GraphAPI(object):
return json_response return json_response
@view_config(route_name='graph_manage', request_method='DELETE') @view_config(route_name='temporal_graph_manage', request_method='DELETE')
def delete_temporal_graph(self): def delete_temporal_graph(self):
""" """
An API endpoint to delete a temporal graph associated with a uuid generated by the CLMC service. An API endpoint to delete a temporal graph associated with a uuid generated by the CLMC service.
...@@ -110,6 +110,37 @@ class GraphAPI(object): ...@@ -110,6 +110,37 @@ class GraphAPI(object):
number_of_deleted_nodes = delete_temporal_subgraph(graph, graph_id) number_of_deleted_nodes = delete_temporal_subgraph(graph, graph_id)
return {"deleted": number_of_deleted_nodes} return {"deleted": number_of_deleted_nodes}
@view_config(route_name='full_graph_manage', request_method='DELETE')
def delete_full_graph(self):
"""
An API endpoint to delete a media service graph with a given SFC identifier.
:return: A JSON document containing the number of deleted nodes.
:raises HTTPNotFound: if there is no SFC node with the given identifier
"""
sfc_id = self.request.matchdict['sfc_id'] # get the SFC identifier from the URL
graph = Graph(host=self.request.registry.settings['neo4j_host'], password=self.request.registry.settings['neo4j_password']) # connect to the neo4j graph db
# check if this SFC node exists
sfc_node = graph.nodes.match("ServiceFunctionChain", name=sfc_id).first()
if sfc_node is None:
raise HTTPNotFound("No service function chain node found with identifier {0}".format(sfc_id))
# delete all nodes that are linked to the SFC node
count = 0
for node in depth_first_search(graph, sfc_node):
graph.delete(node)
count += 1
# delete any reference nodes for temporal graphs that are associated to this SFC identifier
for node in graph.nodes.match("Reference", sfc=sfc_id):
graph.delete(node)
count += 1
return {"deleted": count}
@view_config(route_name='graph_algorithms_rtt', request_method='GET') @view_config(route_name='graph_algorithms_rtt', request_method='GET')
def run_rtt_query(self): def run_rtt_query(self):
""" """
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment