Skip to content
Snippets Groups Projects
Commit e6b42371 authored by Nikolay Stanchev's avatar Nikolay Stanchev
Browse files

Implements DELETE for alerts API

parent f3a9771b
No related branches found
No related tags found
No related merge requests found
......@@ -61,8 +61,8 @@ class TestAlertsConfigurationAPI(object):
A fixture to implement setUp/tearDown functionality for all tests by initializing configuration structure for the web service
"""
self.registry = testing.setUp()
self.registry.add_settings({"kapacitor_host": "localhost", "kapacitor_port": 9092, "sfemc_fqdn": "sfemc.localhost", "sfemc_port": 8081})
self.config = testing.setUp()
self.config.add_settings({"kapacitor_host": "localhost", "kapacitor_port": 9092, "sfemc_fqdn": "sfemc.localhost", "sfemc_port": 8081})
yield
......@@ -152,18 +152,23 @@ class TestAlertsConfigurationAPI(object):
Tests the POST API endpoint of the alerts configuration API responsible for receiving alerts specifications.
Test steps are:
* Traverse all valid TOSCA Alerts Specifications in the
src/service/clmcservice/resources/tosca/test-data/clmc-validator/valid and src/service/clmcservice/resources/tosca/test-data/tosca-parser/valid
* Sending a valid TOSCA Alert Specification to the view responsible for configuring Kapacitor
* Traverse all valid TOSCA Alerts Specifications and TOSCA Resource Specifications in the
src/service/clmcservice/resources/tosca/test-data/clmc-validator/valid and src/service/clmcservice/resources/tosca/test-data/tosca-parser/valid folders
* Send a valid TOSCA Alert Specification to the view responsible for configuring Kapacitor and creating alerts
* Check that Kapacitor alerts, topics and handlers are created with the correct identifier and arguments
* Check that the API returns the duplication errors if the same alerts specification is sent
:param app_config: fixture for setUp/tearDown of the web service registry
"""
kapacitor_host = self.config.registry.settings["kapacitor_host"]
kapacitor_port = self.config.registry.settings["kapacitor_port"]
test_folder = "clmc-validator"
alerts_test_data_path = join(dirname(ROOT_DIR), *["resources", "tosca", "test-data", test_folder, "valid"])
resources_test_data_path = join(dirname(ROOT_DIR), *["resources", "tosca", "test-data", "resource-spec"])
# traverse through all files in the clmc-validator/valid folder (expected to be valid TOSCA alert specifications)
for alerts_test_file in listdir(alerts_test_data_path):
alert_spec_abs_path = join(alerts_test_data_path, alerts_test_file)
......@@ -172,6 +177,8 @@ class TestAlertsConfigurationAPI(object):
print("Testing file {0} in folder {1}".format(alerts_test_file, test_folder))
# the respective resource specification consistent with this alert spec. will have the same name, with "alerts"
# being replaced by "resources_valid" for valid spec or "resources_invalid" for invalid spec
valid_resources_test_file = alerts_test_file.replace("alerts", "resources_valid")
invalid_resources_test_file = alerts_test_file.replace("alerts", "resources_invalid")
valid_resource_spec_abs_path = join(resources_test_data_path, valid_resources_test_file)
......@@ -191,12 +198,13 @@ class TestAlertsConfigurationAPI(object):
except HTTPBadRequest:
pass # we expect this to happen
# reset the read pointer of the alert specification file since it was already read once
alert_spec.seek(0)
# then send a consistent resource spec
with open(valid_resource_spec_abs_path) as valid_resource_spec:
request = testing.DummyRequest()
sfc, sfc_instance, alert_ids, topic_handlers = extract_alert_spec_data(alert_spec)
alert_spec.seek(0)
sfc, sfc_instance, alert_ids, topic_handlers = extract_alert_spec_data(alert_spec, self.config.registry.settings["sfemc_fqdn"], self.config.registry.settings["sfemc_port"])
alert_spec.seek(0) # reset the read pointer to the beginning again (the extraction in the previous step had to read the full file)
request.POST['alert-spec'] = FieldStorageMock(alerts_test_file, alert_spec) # a simple mock class is used to mimic the FieldStorage class
request.POST['resource-spec'] = FieldStorageMock(valid_resources_test_file, valid_resource_spec)
clmc_service_response = AlertsConfigurationAPI(request).post_alerts_specification()
......@@ -208,7 +216,7 @@ class TestAlertsConfigurationAPI(object):
# traverse through all alert IDs and check that they are created within Kapacitor
for alert_id, alert_type in alert_ids:
kapacitor_response = get("http://localhost:9092/kapacitor/v1/tasks/{0}".format(alert_id))
kapacitor_response = get("http://{0}:{1}/kapacitor/v1/tasks/{2}".format(kapacitor_host, kapacitor_port, alert_id))
assert kapacitor_response.status_code == 200, "Alert with ID {0} was not created - test file {1}.".format(alert_id, alerts_test_file)
kapacitor_response_json = kapacitor_response.json()
assert "link" in kapacitor_response_json, "Incorrect response from kapacitor for alert with ID {0} - test file {1}".format(alert_id, alerts_test_file)
......@@ -217,17 +225,15 @@ class TestAlertsConfigurationAPI(object):
assert kapacitor_response_json["type"] == alert_type, "Alert with ID {0} was created with the wrong type - test file {1}".format(alert_id, alerts_test_file)
# check that all topic IDs were registered within Kapacitor
topic_ids = list(topic_handlers.keys())
kapacitor_response = get("http://localhost:9092/kapacitor/v1/alerts/topics")
assert kapacitor_response.status_code == 200, "Kapacitor couldn't return the list of created topics - test file {0}".format(alerts_test_file)
kapacitor_response_json = kapacitor_response.json()
kapacitor_defined_topics = [topic["id"] for topic in kapacitor_response_json["topics"]]
assert set(topic_ids).issubset(kapacitor_defined_topics), "Not all topic IDs were created within kapacitor - test file {0}".format(alerts_test_file)
# check that all handler IDs were created and each of them is subscribed to the correct topic ID
for topic_id in topic_handlers:
for topic_id in topic_handlers.keys():
kapacitor_response = get("http://{0}:{1}/kapacitor/v1/alerts/topics/{2}".format(kapacitor_host, kapacitor_port, topic_id))
assert kapacitor_response.status_code == 200, "Topic with ID {0} was not created - test file {1}".format(topic_id, alerts_test_file)
kapacitor_response_json = kapacitor_response.json()
assert kapacitor_response_json["id"] == topic_id, "Topic {0} was created with incorrect ID - test file {1}".format(topic_id, alerts_test_file)
for handler_id, handler_url in topic_handlers[topic_id]:
kapacitor_response = get("http://localhost:9092/kapacitor/v1/alerts/topics/{0}/handlers/{1}".format(topic_id, handler_id))
kapacitor_response = get("http://{0}:{1}/kapacitor/v1/alerts/topics/{2}/handlers/{3}".format(kapacitor_host, kapacitor_port, topic_id, handler_id))
assert kapacitor_response.status_code == 200, "Handler with ID {0} for topic with ID {1} doesn't exist - test file {2}".format(handler_id, topic_id, alerts_test_file)
kapacitor_response_json = kapacitor_response.json()
assert kapacitor_response_json["id"] == handler_id, "Incorrect ID of handler {0} in the Kapacitor response - test file {1}".format(handler_id, alerts_test_file)
......@@ -247,7 +253,94 @@ class TestAlertsConfigurationAPI(object):
handlers_count = sum([len(topic_handlers[topic]) for topic in topic_handlers])
assert len(clmc_service_response["triggers_action_errors"]) == handlers_count, "Expected errors were not returned for handlers specification"
clear_kapacitor_alerts(alert_ids, topic_handlers)
clear_kapacitor_alerts(alert_ids, topic_handlers, kapacitor_host, kapacitor_port)
def test_alerts_config_api_delete(self, app_config):
"""
Tests the DELETE API endpoint of the alerts configuration API responsible for deleting alerts specifications.
Test steps are:
* Traverse all valid TOSCA Alerts Specifications and TOSCA Resource Specifications in the
src/service/clmcservice/resources/tosca/test-data/clmc-validator/valid and src/service/clmcservice/resources/tosca/test-data/tosca-parser/valid folders
* Send a valid TOSCA Alert Specification to the view responsible for configuring Kapacitor and creating alerts
* Send a valid TOSCA Alert Specification to the view responsible for deleting the created alerts if they exist
* Check that all Kapacitor resources (task, topic, handler) have been deleted
:param app_config: fixture for setUp/tearDown of the web service registry
"""
kapacitor_host = self.config.registry.settings["kapacitor_host"]
kapacitor_port = self.config.registry.settings["kapacitor_port"]
test_folder = "clmc-validator"
alerts_test_data_path = join(dirname(ROOT_DIR), *["resources", "tosca", "test-data", test_folder, "valid"])
resources_test_data_path = join(dirname(ROOT_DIR), *["resources", "tosca", "test-data", "resource-spec"])
# traverse through all files in the clmc-validator/valid folder (expected to be valid TOSCA alert specifications)
for alerts_test_file in listdir(alerts_test_data_path):
alert_spec_abs_path = join(alerts_test_data_path, alerts_test_file)
if not isfile(alert_spec_abs_path):
continue # skip directories
print("Testing file {0} in folder {1}".format(alerts_test_file, test_folder))
# the respective resource specification consistent with this alert spec. will have the same name, with "alerts"
# being replaced by "resources_valid" for valid spec or "resources_invalid" for invalid spec
resources_test_file = alerts_test_file.replace("alerts", "resources_valid")
resource_spec_abs_path = join(resources_test_data_path, resources_test_file)
print("Test uses resource spec. file {0}".format(resources_test_file))
with open(alert_spec_abs_path) as alert_spec:
alerts, handlers = extract_tosca_spec_data(alert_spec)
alert_spec.seek(0) # reset the read pointer to the beginning again (the extraction in the previous step had to read the full file)
_, _, alert_ids, topic_handlers = extract_alert_spec_data(alert_spec, self.config.registry.settings["sfemc_fqdn"], self.config.registry.settings["sfemc_port"])
alert_spec.seek(0) # reset the read pointer to the beginning again (the extraction in the previous step had to read the full file)
# send valid alert and resource spec to create the alerts to be deleted afterwards
with open(resource_spec_abs_path) as resource_spec:
request = testing.DummyRequest()
request.POST['alert-spec'] = FieldStorageMock(alerts_test_file, alert_spec) # a simple mock class is used to mimic the FieldStorage class
request.POST['resource-spec'] = FieldStorageMock(resources_test_file, resource_spec)
AlertsConfigurationAPI(request).post_alerts_specification()
# now send the alert spec for deletion and check that everything is deleted in Kapacitor
alert_spec.seek(0) # reset the read pointer since the file has already been read once
request = testing.DummyRequest()
request.params['alert-spec'] = FieldStorageMock(alerts_test_file, alert_spec)
response = AlertsConfigurationAPI(request).delete_alerts_specification()
# assert the response is what's expected containing the deleted alerts and handlers
assert response.keys() == {"deleted_alerts", "deleted_handlers"}, "Incorrect response format"
assert sorted(response["deleted_alerts"], key=lambda x: x["trigger"]) == alerts, "Incorrect result for deleted alerts"
assert sorted(response["deleted_handlers"], key=lambda x: x["handler"]) == handlers, "Incorrect result for deleted handlers"
# ensure that these resource (tasks, topics, handlers) do not exist in Kapacitor anymore
# traverse through all alert IDs and check that they are deleted from Kapacitor
for alert_id, alert_type in alert_ids:
kapacitor_response = get("http://{0}:{1}/kapacitor/v1/tasks/{2}".format(kapacitor_host, kapacitor_port, alert_id))
assert kapacitor_response.status_code == 404, "Alert with ID {0} was not deleted - test file {1}.".format(alert_id, alerts_test_file)
# check that all topic IDs were deleted from Kapacitor
for topic_id in topic_handlers.keys():
kapacitor_response = get("http://{0}:{1}/kapacitor/v1/alerts/topics/{2}".format(kapacitor_host, kapacitor_port, topic_id))
assert kapacitor_response.status_code == 404, "Topic with ID {0} was not deleted - test file {1}".format(topic_id, alerts_test_file)
# check that all handler IDs were deleted from Kapacitor
for topic_id in topic_handlers:
for handler_id, handler_url in topic_handlers[topic_id]:
kapacitor_response = get("http://{0}:{1}/kapacitor/v1/alerts/topics/{2}/handlers/{3}".format(kapacitor_host, kapacitor_port, topic_id, handler_id))
assert kapacitor_response.status_code == 404, "Handler with ID {0} for topic with ID {1} was not deleted - test file {2}".format(handler_id, topic_id, alerts_test_file)
# now send a second delete request to ensure that nothing else is deleted
with open(alert_spec_abs_path) as alert_spec:
request = testing.DummyRequest()
request.params['alert-spec'] = FieldStorageMock(alerts_test_file, alert_spec)
response = AlertsConfigurationAPI(request).delete_alerts_specification()
assert response == {"deleted_alerts": [], "deleted_handlers": []}, "Incorrect response after a second delete"
class FieldStorageMock(object):
......@@ -264,11 +357,14 @@ class FieldStorageMock(object):
self.file = file
def extract_alert_spec_data(alert_spec):
def extract_alert_spec_data(alert_spec, sfemc_fqdn, sfemc_port):
"""
A utility function to extract the expected alert, handler and topic identifiers from a given alert specification.
A utility function to extract the expected alert, handler and topic identifiers (Kapacitor resources) from a given alert specification.
:param alert_spec: the alert specification file (file object)
:param sfemc_fqdn: FQDN of SFEMC
:param sfemc_port: port number of SFEMC
:return: a tuple containing sfc_id and sfc_instance_id along with a list and a dictionary of generated IDs (alert IDs (list), topic IDs linked to handler IDs (dict))
"""
......@@ -301,7 +397,7 @@ def extract_alert_spec_data(alert_spec):
for handler_url in trigger.trigger_tpl["action"]["implementation"]:
if handler_url == "flame_sfemc":
handler_url = "http://sfemc.localhost:8081/sfemc/event/{0}/{1}/{2}".format(sfc, policy_id, "trigger_id_{0}".format(version))
handler_url = "http://{0}:{1}/sfemc/event/{2}/{3}/{4}".format(sfemc_fqdn, sfemc_port, sfc, policy_id, "trigger_id_{0}".format(version))
handler_id = "{0}\n{1}\n{2}\n{3}\n{4}".format(sfc, sfc_instance, policy_id, trigger_id, handler_url)
handler_id = AlertsConfigurationAPI.get_hash(handler_id)
......@@ -310,6 +406,36 @@ def extract_alert_spec_data(alert_spec):
return sfc, sfc_instance, alert_ids, topic_handlers
def extract_tosca_spec_data(alert_spec):
"""
A utility function to extract the expected triggers and handlers (TOSCA resources) from a given alert specification.
:param alert_spec: the alert specification file (file object)
:return: a tuple containing a list of alert definitions (policy and trigger IDs) and a list of handler definitions (policy, trigger, handler)
"""
yaml_alert_spec = load(alert_spec)
adjust_tosca_definitions_import(yaml_alert_spec)
tosca_tpl = ToscaTemplate(yaml_dict_tpl=yaml_alert_spec)
alerts = [] # saves all alert definitions in a list
handlers = [] # saves all handler definitions in a list
for policy in tosca_tpl.policies:
policy_id = policy.name
for trigger in policy.triggers:
trigger_id = trigger.name
alerts.append({"policy": policy_id, "trigger": trigger_id})
for handler_url in trigger.trigger_tpl["action"]["implementation"]:
handlers.append({"policy": policy_id, "trigger": trigger_id, "handler": handler_url})
return sorted(alerts, key=lambda x: x["trigger"]), sorted(handlers, key=lambda x: x["handler"])
def get_alert_type(event_type, alert_period):
"""
Retrieve the alert type (stream ot batch) based on the event type and alert period.
......@@ -330,22 +456,24 @@ def get_alert_type(event_type, alert_period):
return events[event_type]
def clear_kapacitor_alerts(alert_ids, topic_handlers):
def clear_kapacitor_alerts(alert_ids, topic_handlers, kapacitor_host, kapacitor_port):
"""
A utility function to clean up Kapacitor from the configured alerts, topics and handlers.
:param alert_ids: the list of alert IDs to delete
:param topic_handlers: the dictionary of topic and handlers to delete
:param kapacitor_host: Kapacitor hostname
:param kapacitor_port: Kapacitor port number
"""
for alert_id, _ in alert_ids:
kapacitor_response = delete("http://localhost:9092/kapacitor/v1/tasks/{0}".format(alert_id)) # delete alert
kapacitor_response = delete("http://{0}:{1}/kapacitor/v1/tasks/{2}".format(kapacitor_host, kapacitor_port, alert_id)) # delete alert
assert kapacitor_response.status_code == 204
for topic_id in topic_handlers:
for handler_id, handler_url in topic_handlers[topic_id]:
kapacitor_response = delete("http://localhost:9092/kapacitor/v1/alerts/topics/{0}/handlers/{1}".format(topic_id, handler_id)) # delete handler
kapacitor_response = delete("http://{0}:{1}/kapacitor/v1/alerts/topics/{2}/handlers/{3}".format(kapacitor_host, kapacitor_port, topic_id, handler_id)) # delete handler
assert kapacitor_response.status_code == 204
kapacitor_response = delete("http://localhost:9092/kapacitor/v1/alerts/topics/{0}".format(topic_id)) # delete topic
kapacitor_response = delete("http://{0}:{1}/kapacitor/v1/alerts/topics/{2}".format(kapacitor_host, kapacitor_port, topic_id)) # delete topic
assert kapacitor_response.status_code == 204
......@@ -31,7 +31,7 @@ from pyramid.httpexceptions import HTTPBadRequest
from pyramid.view import view_defaults, view_config
from yaml import load, YAMLError
from toscaparser.tosca_template import ToscaTemplate
from requests import post
from requests import post, get, delete
# CLMC-service imports
from clmcservice.alertsapi.utilities import adjust_tosca_definitions_import, TICKScriptTemplateFiller, fill_http_post_handler_vars, get_resource_spec_policy_triggers, get_alert_spec_policy_triggers
......@@ -88,11 +88,108 @@ class AlertsConfigurationAPI(object):
"topic_handlers_api_endpoint": "/kapacitor/v1/alerts/topics/{0}/handlers".format(topic_id)
}
@view_config(route_name='alerts_configuration', request_method='DELETE')
def delete_alerts_specification(self):
"""
The view for deleting alerts based on a TOSCA alerts specification document.
:return: a dictionary with the deleted alerts and handlers
:raises HTTPBadRequest: if the request doesn't contain a (YAML) file input referenced as alert-spec representing the TOSCA Alerts Specification
"""
kapacitor_host, kapacitor_port = self.request.registry.settings['kapacitor_host'], self.request.registry.settings['kapacitor_port']
sfemc_fqdn, sfemc_port = self.request.registry.settings['sfemc_fqdn'], self.request.registry.settings['sfemc_port']
alert_spec_reference = self.request.params.get('alert-spec')
# check that the alerts specification file was sent
if not hasattr(alert_spec_reference, "file") or not hasattr(alert_spec_reference, "filename"):
raise HTTPBadRequest("Request to this API endpoint must include a (YAML) file input referenced as 'alert-spec' representing the TOSCA Alert Specification.")
# extract alert specification file and filename
alerts_input_filename = alert_spec_reference.filename
alerts_input_file = alert_spec_reference.file
if not (alerts_input_filename.lower().endswith('.yaml') or alerts_input_filename.lower.endswith('.yml')):
raise HTTPBadRequest("Request to this API endpoint must include a (YAML) file input referenced as 'alert-spec' representing the TOSCA Alerts Specification.")
# parse the alerts specification file
try:
alerts_yaml_content = load(alerts_input_file)
adjust_tosca_definitions_import(alerts_yaml_content)
except YAMLError as err:
log.error("Couldn't parse user request file {0} to yaml format due to error: {1}".format(alerts_input_filename, err))
log.error("Invalid content is: {0}".format(alerts_input_file.read()))
raise HTTPBadRequest("Request alert specification file could not be parsed as valid YAML document.")
try:
tosca_tpl = ToscaTemplate(yaml_dict_tpl=alerts_yaml_content)
except Exception as e:
log.error(e)
raise HTTPBadRequest("Request alert specification file could not be parsed as a valid TOSCA document.")
valid_alert_spec = validate_clmc_alerts_specification(tosca_tpl.tpl)
if not valid_alert_spec:
raise HTTPBadRequest("Request alert specification file could not be validated as a CLMC TOSCA alerts specification document.")
# TODO next release - uncomment
# sfc, sfc_instance = tosca_tpl.tpl["metadata"]["sfc"], tosca_tpl.tpl["metadata"]["sfci"]
sfc = tosca_tpl.tpl["metadata"]["servicefunctionchain"]
sfc_instance = "{0}_1".format(sfc)
alerts = []
handlers = []
for policy in tosca_tpl.policies:
for trigger in policy.triggers:
event_id = trigger.name
policy_id = policy.name
# generate topic and alert identifiers
topic_id = "{0}\n{1}\n{2}\n{3}".format(sfc, sfc_instance, policy_id, event_id) # scoped per service function chain instance (no two sfc instances report to the same topic)
topic_id = self.get_hash(topic_id)
task_id = topic_id
# delete alert task
kapacitor_api_task_url = "http://{0}:{1}/kapacitor/v1/tasks/{2}".format(kapacitor_host, kapacitor_port, task_id)
if get(kapacitor_api_task_url).status_code == 200:
delete(kapacitor_api_task_url)
alerts.append({"policy": policy_id, "trigger": event_id})
# get all alert handlers
kapacitor_api_topic_handlers_url = "http://{0}:{1}/kapacitor/v1/alerts/topics/{2}/handlers".format(kapacitor_host, kapacitor_port, topic_id)
http_response = get(kapacitor_api_topic_handlers_url)
if http_response.status_code != 200:
continue # if the topic doesn't exist continue with the other triggers
# delete alert handlers
http_handlers = http_response.json()['handlers']
for handler in http_handlers:
original_http_handler_url = handler["options"]["url"]
if original_http_handler_url.startswith("http://{0}:{1}/".format(sfemc_fqdn, sfemc_port)):
original_http_handler_url = SFEMC
handler_id = handler["id"]
kapacitor_api_handler_url = "http://{0}:{1}/kapacitor/v1/alerts/topics/{2}/handlers/{3}".format(kapacitor_host, kapacitor_port, topic_id, handler_id)
delete(kapacitor_api_handler_url)
handlers.append({"policy": policy_id, "trigger": event_id, "handler": original_http_handler_url})
# delete alert topic
kapacitor_api_topic_url = "http://{0}:{1}/kapacitor/v1/alerts/topics/{2}".format(kapacitor_host, kapacitor_port, topic_id)
delete(kapacitor_api_topic_url)
return {"deleted_alerts": alerts, "deleted_handlers": handlers}
@view_config(route_name='alerts_configuration', request_method='POST')
def post_alerts_specification(self):
"""
The view for receiving and configuring alerts based on the TOSCA alerts specification document. This endpoint must also receive the TOSCA resources specification document for validation.
:return: a dictionary with a msg and optional keys for errors encountered while interacting with Kapacitor
:raises HTTPBadRequest: if the request doesn't contain a (YAML) file input referenced as alert-spec representing the TOSCA Alerts Specification
"""
......@@ -106,7 +203,7 @@ class AlertsConfigurationAPI(object):
raise HTTPBadRequest("Request to this API endpoint must include a (YAML) file input referenced as 'resource-spec' representing the TOSCA Resource Specification.")
try:
resource_spec_sfc, resource_spec_sfc_i, resource_spec_policy_triggers = get_resource_spec_policy_triggers(resource_spec_reference)
resource_spec_sfc, resource_spec_sfc_instance, resource_spec_policy_triggers = get_resource_spec_policy_triggers(resource_spec_reference)
except Exception as e:
log.error("Couldn't extract resource specification event IDs due to error: {0}".format(e))
raise HTTPBadRequest("Couldn't extract resource specification event IDs - invalid TOSCA resource specification.")
......@@ -119,7 +216,7 @@ class AlertsConfigurationAPI(object):
alerts_input_filename = alert_spec_reference.filename
alerts_input_file = alert_spec_reference.file
if not alerts_input_filename.lower().endswith('.yaml'):
if not (alerts_input_filename.lower().endswith('.yaml') or alerts_input_filename.lower.endswith('.yml')):
raise HTTPBadRequest("Request to this API endpoint must include a (YAML) file input referenced as 'alert-spec' representing the TOSCA Alerts Specification.")
# parse the alerts specification file
......@@ -148,7 +245,7 @@ class AlertsConfigurationAPI(object):
sfc_instance = "{0}_1".format(sfc)
# do validation between the two TOSCA documents
self._compare_alert_and_resource_spec(sfc, sfc_instance, alert_spec_policy_triggers, resource_spec_sfc, resource_spec_sfc_i, resource_spec_policy_triggers)
self._compare_alert_and_resource_spec(sfc, sfc_instance, alert_spec_policy_triggers, resource_spec_sfc, resource_spec_sfc_instance, resource_spec_policy_triggers)
db = sfc # database per service function chain, named after the service function chain ID
......@@ -218,10 +315,10 @@ class AlertsConfigurationAPI(object):
:param resource_spec_policy_triggers: the extracted policy-trigger strings from the resource specification
:param alert_tasks_errors: the list for tracking errors while interacting with Kapacitor tasks
:param alert_handlers_errors: the list for tracking errors while interacting with Kapacitor alert handlers
:return: the list of successfully registered event identifiers
"""
kapacitor_api_tasks_url = "http://{0}:{1}/kapacitor/v1/tasks".format(kapacitor_host, kapacitor_port)
for policy in tosca_tpl.policies:
for trigger in policy.triggers:
event_id = trigger.name
......@@ -255,7 +352,7 @@ class AlertsConfigurationAPI(object):
# generate topic and alert identifiers
topic_id = "{0}\n{1}\n{2}\n{3}".format(sfc, sfc_instance, policy_id, event_id) # scoped per service function chain instance (no two sfc instances report to the same topic)
topic_id = self.get_hash(topic_id)
alert_id = topic_id
task_id = topic_id
# check whether the template needs to be a stream or a batch
if event_type in self.DUAL_VERSION_TEMPLATES:
......@@ -273,9 +370,8 @@ class AlertsConfigurationAPI(object):
alert_period=alert_period, topic_id=topic_id, event_id=event_id, where_clause=where_clause)
# create and activate alert task through the kapacitor HTTP API
kapacitor_api_tasks_url = "http://{0}:{1}/kapacitor/v1/tasks".format(kapacitor_host, kapacitor_port)
kapacitor_http_request_body = {
"id": alert_id,
"id": task_id,
"template-id": template_id,
"dbrps": [{"db": db, "rp": "autogen"}],
"status": "enabled",
......@@ -321,6 +417,7 @@ class AlertsConfigurationAPI(object):
"""
kapacitor_api_handlers_url = "http://{0}:{1}/kapacitor/v1/alerts/topics/{2}/handlers".format(kapacitor_host, kapacitor_port, topic_id)
for http_handler_url in http_handlers:
# check for flame_sfemc entry, if found replace with sfemc FQDN
......
......@@ -89,19 +89,3 @@ def set_up_tear_down_fixture(rspec_config):
kill(process_id, SIGKILL)
if exists(LOG_TEST_FOLDER_PATH):
rmtree(LOG_TEST_FOLDER_PATH)
print("Deleting Kapacitor tasks, topics and handlers that were created for this test...")
# get all tasks from kapacitor (that were created in this test) and delete them
kapacitor_tasks = get("{0}/kapacitor/v1/tasks".format(kapacitor_url)).json()["tasks"]
kapacitor_task_links = [task["link"]["href"] for task in kapacitor_tasks]
for task_link in kapacitor_task_links:
delete("{0}{1}".format(kapacitor_url, task_link))
# get all topics and handlers from kapacitor (that were created in this test) and delete them
kapacitor_topics = get("{0}/kapacitor/v1/alerts/topics".format(kapacitor_url)).json()["topics"]
for topic in kapacitor_topics:
topic_handlers = get("{0}{1}".format(kapacitor_url, topic["handlers-link"]["href"])).json()["handlers"]
for handler in topic_handlers:
delete("{0}{1}".format(kapacitor_url, handler["link"]["href"]))
delete("{0}{1}".format(kapacitor_url, topic["link"]["href"]))
......@@ -23,7 +23,7 @@
"""
from time import sleep, strptime
from requests import post, get
from requests import post, get, delete
from os import listdir
from os.path import join, dirname
from json import load
......@@ -149,3 +149,24 @@ class TestAlerts(object):
valid = False
assert valid, "Alert log content is invalid - {0}".format(alert_log_path)
with open(alerts_spec, 'rb') as alerts:
files = {'alert-spec': alerts}
response = delete("http://{0}/clmc-service/alerts".format(clmc_service_host), files=files)
assert response.status_code == 200, "Incorrect status code returned after deleting the alert specification"
json_response = response.json()
# sort by trigger to ensure comparison order is correct
assert sorted(json_response["deleted_alerts"], key=lambda x: x['trigger']) == [{"policy": "scale_nginx_policy", "trigger": "high_requests"}, {"policy": "scale_nginx_policy", "trigger": "increase_in_active_requests"},
{"policy": "scale_nginx_policy", "trigger": "increase_in_running_processes"}, {"policy": "deadman_policy", "trigger": "no_measurements"}], \
"Incorrect list of deleted alerts"
# sort by handler and trigger to ensure comparison order is correct
assert sorted(json_response["deleted_handlers"], key=lambda x: (x['handler'], x['trigger'])) == [{"policy": "scale_nginx_policy", "trigger": "increase_in_active_requests", "handler": "flame_sfemc"},
{"policy": "scale_nginx_policy", "trigger": "increase_in_running_processes", "handler": "flame_sfemc"},
{"policy": "deadman_policy", "trigger": "no_measurements", "handler": "flame_sfemc"},
{"policy": "scale_nginx_policy", "trigger": "high_requests", "handler": "http://172.40.231.200:9999/"},
{"policy": "scale_nginx_policy", "trigger": "increase_in_active_requests", "handler": "http://172.40.231.200:9999/"},
{"policy": "scale_nginx_policy", "trigger": "increase_in_running_processes", "handler": "http://172.40.231.200:9999/"},
{"policy": "deadman_policy", "trigger": "no_measurements", "handler": "http://172.40.231.200:9999/"}], \
"Incorrect list of deleted handlers"
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment