From 69f50c74a5457e96f75537331ca8a85c036d26c9 Mon Sep 17 00:00:00 2001
From: Nikolay Stanchev <ns17@it-innovation.soton.ac.uk>
Date: Wed, 29 Aug 2018 15:25:00 +0100
Subject: [PATCH] Updates alerts API to use new line character concatenation
 and hash values, updates tests to reflect on this, implements and tests
 validation against TOSCA resource spec.

---
 src/service/clmcservice/alertsapi/tests.py    | 150 ++++++++-------
 .../clmcservice/alertsapi/utilities.py        |  57 ++++++
 src/service/clmcservice/alertsapi/views.py    |  79 +++++++-
 .../valid/alerts_test_config-1.yaml           |  58 +-----
 .../valid/alerts_test_config-2.yaml           |  60 ++----
 .../resources_invalid_test_config-1.yaml      | 159 ++++++++++++++++
 .../resources_invalid_test_config-2.yaml      | 119 ++++++++++++
 .../resources_invalid_test_config-3.yaml      | 157 ++++++++++++++++
 .../resources_invalid_test_config-4.yaml      | 174 ++++++++++++++++++
 .../resources_invalid_test_config-5.yaml      | 174 ++++++++++++++++++
 .../resources_valid_test_config-1.yaml        | 159 ++++++++++++++++
 .../resources_valid_test_config-2.yaml        | 119 ++++++++++++
 .../resources_valid_test_config-3.yaml        | 157 ++++++++++++++++
 .../resources_valid_test_config-4.yaml        | 174 ++++++++++++++++++
 .../resources_valid_test_config-5.yaml        | 174 ++++++++++++++++++
 15 files changed, 1797 insertions(+), 173 deletions(-)
 create mode 100644 src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-1.yaml
 create mode 100644 src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-2.yaml
 create mode 100644 src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-3.yaml
 create mode 100644 src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-4.yaml
 create mode 100644 src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-5.yaml
 create mode 100644 src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-1.yaml
 create mode 100644 src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-2.yaml
 create mode 100644 src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-3.yaml
 create mode 100644 src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-4.yaml
 create mode 100644 src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-5.yaml

diff --git a/src/service/clmcservice/alertsapi/tests.py b/src/service/clmcservice/alertsapi/tests.py
index f92af20..0f1d489 100644
--- a/src/service/clmcservice/alertsapi/tests.py
+++ b/src/service/clmcservice/alertsapi/tests.py
@@ -30,6 +30,7 @@ from urllib.parse import urlparse
 
 # PIP installed libs
 import pytest
+from pyramid.httpexceptions import HTTPBadRequest
 from yaml import load
 from pyramid import testing
 from requests import get, delete
@@ -82,9 +83,6 @@ class TestAlertsConfigurationAPI(object):
                 if not isfile(alert_config_abs_path):
                     continue  # skip directories
 
-                if not test_file_path.lower().endswith('.yaml'):
-                    continue  # non-yaml files are not intended for being tested
-
                 print(alert_config_abs_path, valid_expected)
 
                 with open(alert_config_abs_path, 'r') as fh:
@@ -113,9 +111,6 @@ class TestAlertsConfigurationAPI(object):
                 if not isfile(alert_config_abs_path):
                     continue  # skip directories
 
-                if not test_file_path.lower().endswith('.yaml'):
-                    continue  # non-yaml files are not intended for being tested
-
                 print(alert_config_abs_path, valid_expected)
 
                 with open(alert_config_abs_path, 'r') as fh:
@@ -140,72 +135,93 @@ class TestAlertsConfigurationAPI(object):
         :param app_config: fixture for setUp/tearDown of the web service registry
         """
 
-        for test_folder in ("clmc-validator", "tosca-parser"):
-            test_data_path = join(dirname(ROOT_DIR), *["resources", "tosca", "test-data", test_folder, "valid"])
-
-            for test_file_path in listdir(test_data_path):
-                alert_spec_abs_path = join(test_data_path, test_file_path)
-
-                if not isfile(alert_spec_abs_path):
-                    continue  # skip directories
-
-                if not test_file_path.lower().endswith('.yaml'):
-                    continue  # non-yaml files are not intended for being tested
-
-                print("Testing file {0} in folder {1}".format(test_file_path, test_folder))
-
-                request = testing.DummyRequest()
-
-                with open(alert_spec_abs_path) as alert_spec:
+        test_folder = "clmc-validator"
+        alerts_test_data_path = join(dirname(ROOT_DIR), *["resources", "tosca", "test-data", test_folder, "valid"])
+        resources_test_data_path = join(dirname(ROOT_DIR), *["resources", "tosca", "test-data", "resource-spec"])
+
+        for alerts_test_file in listdir(alerts_test_data_path):
+            alert_spec_abs_path = join(alerts_test_data_path, alerts_test_file)
+
+            if not isfile(alert_spec_abs_path):
+                continue  # skip directories
+
+            print("Testing file {0} in folder {1}".format(alerts_test_file, test_folder))
+
+            valid_resources_test_file = alerts_test_file.replace("alerts", "resources_valid")
+            invalid_resources_test_file = alerts_test_file.replace("alerts", "resources_invalid")
+            valid_resource_spec_abs_path = join(resources_test_data_path, valid_resources_test_file)
+            invalid_resource_spec_abs_path = join(resources_test_data_path, invalid_resources_test_file)
+
+            print("Test uses resource spec. files {0} and {1}".format(valid_resources_test_file, invalid_resources_test_file))
+
+            with open(alert_spec_abs_path) as alert_spec:
+                # first send an inconsistent resource spec
+                with open(invalid_resource_spec_abs_path) as invalid_resource_spec:
+                    request = testing.DummyRequest()
+                    request.POST['alert-spec'] = FieldStorageMock(alerts_test_file, alert_spec)
+                    request.POST['resource-spec'] = FieldStorageMock(invalid_resources_test_file, invalid_resource_spec)
+                    try:
+                        AlertsConfigurationAPI(request).post_alerts_specification()
+                        assert False, "No error was returned even though an inconsistent resource specification was sent"
+                    except HTTPBadRequest:
+                        pass  # we expect this to happen
+
+                alert_spec.seek(0)
+                # then send a consistent resource spec
+                with open(valid_resource_spec_abs_path) as valid_resource_spec:
+                    request = testing.DummyRequest()
                     sfc, sfc_instance, alert_ids, topic_handlers = extract_alert_spec_data(alert_spec)
                     alert_spec.seek(0)
-                    request.POST['alert-spec'] = FieldStorageMock(test_file_path, alert_spec)  # a simple mock class is used to mimic the FieldStorage class
+                    request.POST['alert-spec'] = FieldStorageMock(alerts_test_file, alert_spec)  # a simple mock class is used to mimic the FieldStorage class
+                    request.POST['resource-spec'] = FieldStorageMock(valid_resources_test_file, valid_resource_spec)
                     clmc_service_response = AlertsConfigurationAPI(request).post_alerts_specification()
 
-                assert (sfc, sfc_instance) == (clmc_service_response["service_function_chain_id"], clmc_service_response["service_function_chain_instance_id"]), \
-                    "Incorrect extraction of metadata for file {0}". format(test_file_path)
-                assert "triggers_specification_errors" not in clmc_service_response, "Unexpected error was returned for triggers specification"
-                assert "triggers_action_errors" not in clmc_service_response, "Unexpected error was returned for handlers specification"
+            assert (sfc, sfc_instance) == (clmc_service_response["service_function_chain_id"], clmc_service_response["service_function_chain_instance_id"]), \
+                "Incorrect extraction of metadata for file {0}". format(alerts_test_file)
+            assert "triggers_specification_errors" not in clmc_service_response, "Unexpected error was returned for triggers specification"
+            assert "triggers_action_errors" not in clmc_service_response, "Unexpected error was returned for handlers specification"
 
-                # traverse through all alert IDs and check that they are created within Kapacitor
-                for alert_id in alert_ids:
-                    kapacitor_response = get("http://localhost:9092/kapacitor/v1/tasks/{0}".format(alert_id))
-                    assert kapacitor_response.status_code == 200, "Alert with ID {0} was not created - test file {1}.".format(alert_id, test_file_path)
-                    kapacitor_response_json = kapacitor_response.json()
-                    assert "link" in kapacitor_response_json, "Incorrect response from kapacitor for alert with ID {0} - test file {1}".format(alert_id, test_file_path)
-                    assert kapacitor_response_json["status"] == "enabled", "Alert with ID {0} was created but is disabled - test file {1}".format(alert_id, test_file_path)
-                    assert kapacitor_response_json["executing"], "Alert with ID {0} was created and is enabled, but is not executing - test file {1}".format(alert_id, test_file_path)
-
-                # check that all topic IDs were registered within Kapacitor
-                topic_ids = list(topic_handlers.keys())
-                kapacitor_response = get("http://localhost:9092/kapacitor/v1/alerts/topics")
-                assert kapacitor_response.status_code == 200, "Kapacitor couldn't return the list of created topics - test file {0}".format(test_file_path)
+            # traverse through all alert IDs and check that they are created within Kapacitor
+            for alert_id in alert_ids:
+                kapacitor_response = get("http://localhost:9092/kapacitor/v1/tasks/{0}".format(alert_id))
+                assert kapacitor_response.status_code == 200, "Alert with ID {0} was not created - test file {1}.".format(alert_id, alerts_test_file)
                 kapacitor_response_json = kapacitor_response.json()
-                kapacitor_defined_topics = [topic["id"] for topic in kapacitor_response_json["topics"]]
-                assert set(topic_ids).issubset(kapacitor_defined_topics), "Not all topic IDs were created within kapacitor - test file {0}".format(test_file_path)
-
-                # check that all handler IDs were created and each of them is subscribed to the correct topic ID
-                for topic_id in topic_handlers:
-                    for handler_id, handler_url in topic_handlers[topic_id]:
-                        kapacitor_response = get("http://localhost:9092/kapacitor/v1/alerts/topics/{0}/handlers/{1}".format(topic_id, handler_id))
-                        assert kapacitor_response.status_code == 200, "Handler with ID {0} for topic with ID {1} doesn't exist - test file {2}".format(handler_id, topic_id, test_file_path)
-                        kapacitor_response_json = kapacitor_response.json()
-                        assert kapacitor_response_json["id"] == handler_id, "Incorrect ID of handler {0} in the Kapacitor response - test file {1}".format(handler_id, test_file_path)
-                        assert kapacitor_response_json["kind"] == "post", "Incorrect kind of handler {0} in the Kapacitor response - test file {1}".format(handler_id, test_file_path)
-                        assert kapacitor_response_json["options"]["url"], "Incorrect url of handler {0} in the Kapacitor response - test file {1}".format(handler_id, test_file_path)
-
-                # send the same spec again to check that error messages are returned (because of ID duplication)
-                with open(alert_spec_abs_path) as alert_spec:
-                    request.POST['alert-spec'] = FieldStorageMock(test_file_path, alert_spec)  # a simple mock class is used to mimic the FieldStorage class
+                assert "link" in kapacitor_response_json, "Incorrect response from kapacitor for alert with ID {0} - test file {1}".format(alert_id, alerts_test_file)
+                assert kapacitor_response_json["status"] == "enabled", "Alert with ID {0} was created but is disabled - test file {1}".format(alert_id, alerts_test_file)
+                assert kapacitor_response_json["executing"], "Alert with ID {0} was created and is enabled, but is not executing - test file {1}".format(alert_id, alerts_test_file)
+
+            # check that all topic IDs were registered within Kapacitor
+            topic_ids = list(topic_handlers.keys())
+            kapacitor_response = get("http://localhost:9092/kapacitor/v1/alerts/topics")
+            assert kapacitor_response.status_code == 200, "Kapacitor couldn't return the list of created topics - test file {0}".format(alerts_test_file)
+            kapacitor_response_json = kapacitor_response.json()
+            kapacitor_defined_topics = [topic["id"] for topic in kapacitor_response_json["topics"]]
+            assert set(topic_ids).issubset(kapacitor_defined_topics), "Not all topic IDs were created within kapacitor - test file {0}".format(alerts_test_file)
+
+            # check that all handler IDs were created and each of them is subscribed to the correct topic ID
+            for topic_id in topic_handlers:
+                for handler_id, handler_url in topic_handlers[topic_id]:
+                    kapacitor_response = get("http://localhost:9092/kapacitor/v1/alerts/topics/{0}/handlers/{1}".format(topic_id, handler_id))
+                    assert kapacitor_response.status_code == 200, "Handler with ID {0} for topic with ID {1} doesn't exist - test file {2}".format(handler_id, topic_id, alerts_test_file)
+                    kapacitor_response_json = kapacitor_response.json()
+                    assert kapacitor_response_json["id"] == handler_id, "Incorrect ID of handler {0} in the Kapacitor response - test file {1}".format(handler_id, alerts_test_file)
+                    assert kapacitor_response_json["kind"] == "post", "Incorrect kind of handler {0} in the Kapacitor response - test file {1}".format(handler_id, alerts_test_file)
+                    assert kapacitor_response_json["options"]["url"], "Incorrect url of handler {0} in the Kapacitor response - test file {1}".format(handler_id, alerts_test_file)
+
+            # send the same spec again to check that error messages are returned (because of ID duplication)
+            with open(alert_spec_abs_path) as alert_spec:
+                with open(valid_resource_spec_abs_path) as valid_resource_spec:
+                    request.POST['alert-spec'] = FieldStorageMock(alerts_test_file, alert_spec)  # a simple mock class is used to mimic the FieldStorage class
+                    request.POST['resource-spec'] = FieldStorageMock(valid_resources_test_file, valid_resource_spec)
                     clmc_service_response = AlertsConfigurationAPI(request).post_alerts_specification()
-                assert (sfc, sfc_instance) == (clmc_service_response["service_function_chain_id"], clmc_service_response["service_function_chain_instance_id"]), \
-                    "Incorrect extraction of metadata for file {0}". format(test_file_path)
+            assert (sfc, sfc_instance) == (clmc_service_response["service_function_chain_id"], clmc_service_response["service_function_chain_instance_id"]), \
+                "Incorrect extraction of metadata for file {0}". format(alerts_test_file)
 
-                assert len(clmc_service_response["triggers_specification_errors"]) == len(alert_ids), "Expected errors were not returned for triggers specification"
-                handlers_count = sum([len(topic_handlers[topic]) for topic in topic_handlers])
-                assert len(clmc_service_response["triggers_action_errors"]) == handlers_count, "Expected errors were not returned for handlers specification"
+            assert len(clmc_service_response["triggers_specification_errors"]) == len(alert_ids), "Expected errors were not returned for triggers specification"
+            handlers_count = sum([len(topic_handlers[topic]) for topic in topic_handlers])
+            assert len(clmc_service_response["triggers_action_errors"]) == handlers_count, "Expected errors were not returned for handlers specification"
 
-                clear_kapacitor_alerts(alert_ids, topic_handlers)
+            clear_kapacitor_alerts(alert_ids, topic_handlers)
 
 
 class FieldStorageMock(object):
@@ -243,15 +259,17 @@ def extract_alert_spec_data(alert_spec):
         for trigger in policy.triggers:
             trigger_id = trigger.name
 
-            topic_id = "{0}.{1}.{2}".format(sfc, sfc_instance, trigger_id)
+            topic_id = "{0}\n{1}\n{2}\n{3}".format(sfc, sfc_instance, policy_id, trigger_id)
+            topic_id = AlertsConfigurationAPI.get_hash(topic_id)
             topic_handlers[topic_id] = []
 
-            alert_id = "{0}.{1}.{2}.{3}".format(sfc, sfc_instance, policy_id, trigger_id)
+            alert_id = topic_id
             alert_ids.append(alert_id)
 
             for handler_url in trigger.trigger_tpl["action"]["implementation"]:
                 handler_host = urlparse(handler_url).hostname
-                handler_id = "{0}.{1}.{2}".format(policy_id, trigger_id, handler_host)
+                handler_id = "{0}\n{1}\n{2}\n{3}\n{4}".format(sfc, sfc_instance, policy_id, trigger_id, handler_host)
+                handler_id = AlertsConfigurationAPI.get_hash(handler_id)
                 topic_handlers[topic_id].append((handler_id, handler_url))
 
     return sfc, sfc_instance, alert_ids, topic_handlers
diff --git a/src/service/clmcservice/alertsapi/utilities.py b/src/service/clmcservice/alertsapi/utilities.py
index 32ac93a..41890a9 100644
--- a/src/service/clmcservice/alertsapi/utilities.py
+++ b/src/service/clmcservice/alertsapi/utilities.py
@@ -26,6 +26,9 @@
 # Python standard libs
 from os.path import join
 
+# PIP installed libs
+from yaml import load
+
 # CLMC-service imports
 from clmcservice import ROOT_DIR
 
@@ -54,6 +57,60 @@ def adjust_tosca_definitions_import(alert_spec):
         pass  # nothing to replace if the import is not specified (either imports are missed, or no reference to the clmc tosca definitions file)
 
 
+def get_resource_spec_topic_ids(resource_spec_reference):
+    """
+    Tries to extract all event identifiers from a TOSCA resource specification
+
+    :param resource_spec_reference: the resource specification file reference from the POST HTTP request
+
+    :return: sfc ID, sfc instance ID and the list of topic IDs
+    """
+
+    resource_spec = load(resource_spec_reference.file)
+
+    topic_ids = []
+    sfc, sfc_i = resource_spec["metadata"]["sfc"], resource_spec["metadata"]["sfci"]
+
+    policies = resource_spec["topology_template"]["policies"]
+    for policy in policies:
+        policy = list(policy.items())[0]
+        policy_id, policy_object = policy[0], policy[1]
+
+        if policy_object["type"] == "eu.ict-flame.policies.StateChange":
+            triggers = policy_object["triggers"]
+
+            for trigger in triggers.values():
+                event = trigger["condition"]["constraint"]
+                source, event_id = event.split("::")
+                if source.lower() == "clmc":  # only take those event IDs that have clmc set as their source
+                    topic_ids.append("{0}\n{1}".format(policy_id, event_id))
+
+    return sfc, sfc_i, topic_ids
+
+
+def get_alert_spec_topic_ids(alerts_spec_tpl):
+    """
+    Tries to extract all event identifiers from a TOSCA alerts specification
+
+    :param alerts_spec_tpl: the alerts specification TOSCA template object
+
+    :return: the list of topic IDs
+    """
+
+    topic_ids = []
+
+    for policy in alerts_spec_tpl.policies:
+        policy_id = policy.name
+
+        for trigger in policy.triggers:
+            trigger_id = trigger.name
+
+            topic_id = "{0}\n{1}".format(policy_id, trigger_id)
+            topic_ids.append(topic_id)
+
+    return topic_ids
+
+
 def fill_http_post_handler_vars(handler_id, handler_url):
     """
     Creates a dictionary object ready to be posted to kapacitor to create an alert handler.
diff --git a/src/service/clmcservice/alertsapi/views.py b/src/service/clmcservice/alertsapi/views.py
index 19957f4..09cf313 100644
--- a/src/service/clmcservice/alertsapi/views.py
+++ b/src/service/clmcservice/alertsapi/views.py
@@ -25,6 +25,7 @@
 # Python standard libs
 import logging
 from urllib.parse import urlparse
+from hashlib import sha256
 
 # PIP installed libs
 from pyramid.httpexceptions import HTTPBadRequest
@@ -34,7 +35,7 @@ from toscaparser.tosca_template import ToscaTemplate
 from requests import post
 
 # CLMC-service imports
-from clmcservice.alertsapi.utilities import adjust_tosca_definitions_import, TICKScriptTemplateFiller, fill_http_post_handler_vars
+from clmcservice.alertsapi.utilities import adjust_tosca_definitions_import, TICKScriptTemplateFiller, fill_http_post_handler_vars, get_resource_spec_topic_ids, get_alert_spec_topic_ids
 from clmcservice.alertsapi.alerts_specification_schema import COMPARISON_OPERATORS,  validate_clmc_alerts_specification
 
 # initialise logger
@@ -67,6 +68,12 @@ class AlertsConfigurationAPI(object):
         kapacitor_host, kapacitor_port = self.request.registry.settings['kapacitor_host'], self.request.registry.settings['kapacitor_port']
 
         alert_spec_reference = self.request.POST.get('alert-spec')
+        resource_spec_reference = self.request.POST.get('resource-spec')
+        try:
+            resource_spec_sfc, resource_spec_sfc_i, resource_spec_topic_ids = get_resource_spec_topic_ids(resource_spec_reference)
+        except Exception as e:
+            log.error("Couldn't extract resource specification event IDs due to error: {0}".format(e))
+            raise HTTPBadRequest("Couldn't extract resource specification event IDs - invalid TOSCA resource specification.")
 
         # check that the specification file was sent
         if not hasattr(alert_spec_reference, "file") or not hasattr(alert_spec_reference, "filename"):
@@ -98,9 +105,13 @@ class AlertsConfigurationAPI(object):
         if not valid_alert_spec:
             raise HTTPBadRequest("Request alert specification file could not be validated as a CLMC TOSCA alerts specification document.")
 
+        alert_spec_topic_ids = get_alert_spec_topic_ids(tosca_tpl)
         sfc, sfc_instance = tosca_tpl.tpl["metadata"]["sfc"], tosca_tpl.tpl["metadata"]["sfci"]
-        db = sfc  # ASSUMPTION: database per service function chain, named after the service function chain ID
 
+        # do validation between the two TOSCA documents
+        self._compare_alert_and_resource_spec(sfc, sfc_instance, alert_spec_topic_ids, resource_spec_sfc, resource_spec_sfc_i, resource_spec_topic_ids)
+
+        db = sfc  # ASSUMPTION: database per service function chain, named after the service function chain ID
         # two lists to keep track of any errors while interacting with the Kapacitor HTTP API
         alert_tasks_errors = []
         alert_handlers_errors = []
@@ -119,6 +130,33 @@ class AlertsConfigurationAPI(object):
 
         return return_msg
 
+    def _compare_alert_and_resource_spec(self, alert_spec_sfc, alert_spec_sfc_instance, alert_spec_topics, resource_spec_sfc, resource_spec_sfc_instance, resource_spec_topics):
+        """
+        Compares the extracted values from the resource spec against the values from the alerts spec - validation that they refer to the same things,
+
+        :param alert_spec_sfc: sfc from alert spec
+        :param alert_spec_sfc_instance: sfc instance from alert spec
+        :param alert_spec_topics: policy/trigger IDs from alert spec
+        :param resource_spec_sfc: sfc from resource spec
+        :param resource_spec_sfc_instance: sfc instance from resource spec
+        :param resource_spec_topics: policy/trigger IDs from resource spec
+
+        :raises: HTTP Bad Request if the two specifications are inconsistent
+        """
+
+        if alert_spec_sfc != resource_spec_sfc:
+            raise HTTPBadRequest("Different service function chain ID used in the alert and resource specification documents: {0} != {1}".format(alert_spec_sfc, resource_spec_sfc))
+
+        if alert_spec_sfc_instance != resource_spec_sfc_instance:
+            raise HTTPBadRequest("Different service function chain instance ID used in the alert and resource specification documents: {0} != {1}".format(alert_spec_sfc_instance, resource_spec_sfc_instance))
+
+        alert_spec_topics_set = set(alert_spec_topics)
+        missing_topic_ids = [topic_id for topic_id in resource_spec_topics if topic_id not in alert_spec_topics_set]
+
+        if len(missing_topic_ids) > 0:
+            missing_topic_ids = [topic_id.replace("\n", " : ") for topic_id in missing_topic_ids]
+            raise HTTPBadRequest("Couldn't match the following policy triggers from the resource specification with triggers defined in the alerts specification: {0}".format(missing_topic_ids))
+
     def _config_kapacitor_alerts(self, tosca_tpl, sfc, sfc_instance, db, kapacitor_host, kapacitor_port, alert_tasks_errors, alert_handlers_errors):
         """
         Configures the alerts task and alert handlers within Kapacitor.
@@ -131,11 +169,15 @@ class AlertsConfigurationAPI(object):
         :param kapacitor_port: default value to use is 9092
         :param alert_tasks_errors: the list for tracking errors while interacting with Kapacitor tasks
         :param alert_handlers_errors: the list for tracking errors while interacting with Kapacitor alert handlers
+
+        :return: the list of successfully registered event identifiers
         """
 
         for policy in tosca_tpl.policies:
             for trigger in policy.triggers:
                 event_id = trigger.name
+                policy_id = policy.name
+
                 event_type = trigger.trigger_tpl["event_type"]
                 template_id = "{0}-template".format(event_type)
                 measurement, field = trigger.trigger_tpl["metric"].split(".")
@@ -150,7 +192,7 @@ class AlertsConfigurationAPI(object):
                 if "resource_type" in trigger.trigger_tpl["condition"]:
                     tags = condition["resource_type"]
                     # make sure alert tasks are executing with queries for the given sfc and sfc instance
-                    # tags["sfc"] = sfc TODO uncomment this line when we updated telegraf to name db after sfc
+                    # tags["sfc"] = sfc TODO uncomment this line when we update telegraf to name db after sfc
                     # tags["sfci"] = sfc_instance TODO uncomment this line when telegraf global tags are updated, currently we have sfc_i instead of sfci
 
                     # NOTE: if the template has its where clause defined as lambda (stream templates), then use "==" as comparison operator,
@@ -163,8 +205,9 @@ class AlertsConfigurationAPI(object):
                 comparison_operator = COMPARISON_OPERATORS[condition.get("comparison_operator", "gte")]  # if not specified, use "gte" (>=)
 
                 # generate topic and alert identifiers
-                topic_id = "{0}.{1}.{2}".format(sfc, sfc_instance, event_id)  # scoped per service function chain instance (no two sfc instances report to the same topic)
-                alert_id = "{0}.{1}.{2}.{3}".format(sfc, sfc_instance, policy.name, event_id)
+                topic_id = "{0}\n{1}\n{2}\n{3}".format(sfc, sfc_instance, policy_id, event_id)  # scoped per service function chain instance (no two sfc instances report to the same topic)
+                topic_id = self.get_hash(topic_id)
+                alert_id = topic_id
 
                 # built up the template vars dictionary depending on the event type (threshold, relative, etc.)
                 # all extracted properties from the trigger are passed, the TICKScriptTemplateFiller entry point then forwards those to the appropriate function for template filling
@@ -191,7 +234,7 @@ class AlertsConfigurationAPI(object):
                 # track all reported errors
                 if response_content.get("error", "") != "":
                     alert_tasks_errors.append({
-                        "policy": policy.name,
+                        "policy": policy_id,
                         "trigger": event_id,
                         "error": response_content.get("error")
                     })
@@ -200,14 +243,16 @@ class AlertsConfigurationAPI(object):
                 http_handlers = trigger.trigger_tpl["action"]["implementation"]
 
                 # subscribe all http handlers to the created topic
-                self._config_kapacitor_alert_handlers(kapacitor_host, kapacitor_port, policy.name, topic_id, event_id, http_handlers, alert_handlers_errors)
+                self._config_kapacitor_alert_handlers(kapacitor_host, kapacitor_port, sfc, sfc_instance, policy_id, topic_id, event_id, http_handlers, alert_handlers_errors)
 
-    def _config_kapacitor_alert_handlers(self, kapacitor_host, kapacitor_port, policy_id, topic_id, event_id, http_handlers, alert_handlers_errors):
+    def _config_kapacitor_alert_handlers(self, kapacitor_host, kapacitor_port, sfc, sfc_i, policy_id, topic_id, event_id, http_handlers, alert_handlers_errors):
         """
         Handles the configuration of HTTP Post alert handlers.
 
         :param kapacitor_host: default host is localhost (CLMC service running on the same machine as Kapacitor)
         :param kapacitor_port: default value to use is 9092
+        :param sfc: service function chain identifier
+        :param sfc_i: service function chain instance identifier
         :param policy_id: policy ID those triggers relate to
         :param topic_id: topic ID built of sfc, sfc instance and event_id
         :param event_id: name of trigger
@@ -218,7 +263,8 @@ class AlertsConfigurationAPI(object):
         kapacitor_api_handlers_url = "http://{0}:{1}/kapacitor/v1/alerts/topics/{2}/handlers".format(kapacitor_host, kapacitor_port, topic_id)
         for http_handler_url in http_handlers:
             http_handler_host = urlparse(http_handler_url).hostname
-            handler_id = "{0}.{1}.{2}".format(policy_id, event_id, http_handler_host)
+            handler_id = "{0}\n{1}\n{2}\n{3}\n{4}".format(sfc, sfc_i, policy_id, event_id, http_handler_host)
+            handler_id = self.get_hash(handler_id)
             kapacitor_http_request_body = fill_http_post_handler_vars(handler_id, http_handler_url)
             response = post(kapacitor_api_handlers_url, json=kapacitor_http_request_body)
             response_content = response.json()
@@ -231,3 +277,18 @@ class AlertsConfigurationAPI(object):
                     "handler": http_handler_url,
                     "error": response_content.get("error")
                 })
+
+    @staticmethod
+    def get_hash(message):
+        """
+        Returns the hash value of a message encoded with utf-8 using hash algorithm sha256
+
+        :param message: the message to hash
+
+        :return: the value of the has
+        """
+
+        byte_str = bytes(message, encoding="utf-8")
+        hash_obj = sha256(byte_str)
+
+        return hash_obj.hexdigest()
diff --git a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-1.yaml b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-1.yaml
index 74391ed..b11c672 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-1.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-1.yaml
@@ -52,60 +52,4 @@ topology_template:
             action:
               implementation:
                 - http://sfemc.flame.eu/notify
-                - http://companyA.alert-handler.flame.eu/low-requests
-
-    - requests_diff_policy:
-        type: eu.ict-flame.policies.StateChange
-        triggers:
-          increase_in_requests:
-            description: |
-              This event triggers when the number of requests has increased relative to the number of requests received
-              120 seconds ago.
-            event_type: relative
-            metric: storage.requests
-            condition:
-              threshold: 100  # requests have increased by at least 100
-              granularity: 120
-              resource_type:
-                sf_package: storage
-                sf: storage-users
-                location: watershed
-              comparison_operator: gte
-            action:
-              implementation:
-                - http://sfemc.flame.eu/notify
-          decrease_in_requests:
-            description: |
-              This event triggers when the number of requests has decreased relative to the number of requests received
-              120 seconds ago.
-            event_type: relative
-            metric: storage.requests
-            condition:
-              threshold: -100  # requests have decreased by at least 100
-              granularity: 120
-              resource_type:
-                sf_package: storage
-                sf: storage-users
-                location: watershed
-              comparison_operator: lte
-            action:
-              implementation:
-                - http://sfemc.flame.eu/notify
-
-    - missing_measurement_policy:
-        type: eu.ict-flame.policies.StateChange
-        triggers:
-          missing_storage_measurements:
-            description: This event triggers when the number of storage measurements reported falls below the threshold value.
-            event_type: deadman
-            # deadman trigger instances monitor the whole measurement (storage in this case), so simply put a star for field value
-            # to be compliant with the <measurement>.<field> format
-            metric: storage.*
-            condition:
-              threshold: 0  # if requests are less than or equal to 0 (in other words, no measurements are reported)
-              granularity: 60  # check for for missing data for the last 60 seconds
-              resource_type:
-                sf_package: storage
-            action:
-              implementation:
-                - http://sfemc.flame.eu/notify
\ No newline at end of file
+                - http://companyA.alert-handler.flame.eu/low-requests
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-2.yaml b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-2.yaml
index ed9306d..0d2e1e1 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-2.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-2.yaml
@@ -12,65 +12,43 @@ metadata:
 topology_template:
 
   policies:
-    - high_latency_policy:
+    - requests_diff_policy:
         type: eu.ict-flame.policies.StateChange
         triggers:
-          high_latency:
-            description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
-            event_type: threshold
-            metric: network.latency
+          decrease_in_requests:
+            description: |
+              This event triggers when the number of requests has decreased relative to the number of requests received
+              120 seconds ago.
+            event_type: relative
+            metric: storage.requests
             condition:
-              threshold: 45
+              threshold: -100  # requests have decreased by at least 100
               granularity: 120
-              aggregation_method: mean
               resource_type:
                 sfc: companyA-VR  # sfc tag is also allowed, even though it is already included in the metadata
                 sfci: companyA-VR-premium # sfci tag is also allowed, even though it is already included in the metadata
-                location: watershed
-              comparison_operator: gt
-            action:
-              implementation:
-                - http://sfemc.flame.eu/notify
-                - http://companyA.alert-handler.flame.eu/high-latency
-    - low_requests_policy:
-        type: eu.ict-flame.policies.StateChange
-        triggers:
-          low_requests:
-            description: |
-              This event triggers when the last reported number of requests for a given service function
-              falls behind a given threshold.
-            event_type: threshold
-            metric: storage.requests
-            condition:
-              threshold: 5
-              granularity: 60
-              aggregation_method: last
-              resource_type:
                 sf_package: storage
                 sf: storage-users
                 location: watershed
-              comparison_operator: lt
+              comparison_operator: lte
             action:
               implementation:
                 - http://sfemc.flame.eu/notify
-                - http://companyA.alert-handler.flame.eu/low-requests
-    - requests_diff_policy:
+
+    - missing_measurement_policy:
         type: eu.ict-flame.policies.StateChange
         triggers:
-          decrease_in_requests:
-            description: |
-              This event triggers when the number of requests has decreased relative to the number of requests received
-              120 seconds ago.
-            event_type: relative
-            metric: storage.requests
+          missing_storage_measurements:
+            description: This event triggers when the number of storage measurements reported falls below the threshold value.
+            event_type: deadman
+            # deadman trigger instances monitor the whole measurement (storage in this case), so simply put a star for field value
+            # to be compliant with the <measurement>.<field> format
+            metric: storage.*
             condition:
-              threshold: -100  # requests have decreased by at least 100
-              granularity: 120
+              threshold: 0  # if requests are less than or equal to 0 (in other words, no measurements are reported)
+              granularity: 60  # check for for missing data for the last 60 seconds
               resource_type:
                 sf_package: storage
-                sf: storage-users
-                location: watershed
-              comparison_operator: lte
             action:
               implementation:
                 - http://sfemc.flame.eu/notify
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-1.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-1.yaml
new file mode 100644
index 0000000..abb5ed1
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-1.yaml
@@ -0,0 +1,159 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR-ERROR
+  sfci: companyA-VR-premium
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::high_latency
+              period: 600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::low_requests
+              period: 600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - cooldown_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Based on time constraint, we are shutting down Bristol and Manchester
+        triggers:
+          time_trigger:
+            description: Disconnect Nodes
+            condition:
+              constraint: unknown::time_frame
+              period: 60 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.booted
+        properties:
+          parent: high_latency_check
+          scheduling:
+            dt_start:
+              TZID: "W. Europe Standard Time"
+              time: 20180324T000000
+            dt_end:
+              tzid: "W. Europe Standard Time"
+              time: 20180324T050000
+            rrule:
+              freq: weekly
+              rule:
+                byday: [mo, tu, we, th, su]
+
+    - service_paid:
+        type: eu.ict-flame.policies.StateChange
+        description: Check outstanding payments. If there are outstanding payments, we shutdown the deployed service.
+        triggers:
+          not_paid_trigger:
+            description: Check if the payment is late
+            condition:
+              constraint: unknown::serviceIsNotPaid
+              period: 3600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.shutdown
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_action:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-2.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-2.yaml
new file mode 100644
index 0000000..93b04a9
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-2.yaml
@@ -0,0 +1,119 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR
+  sfci: companyA-VR-premium-ERROR
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - requests_diff_policy:
+        type: eu.ict-flame.policies.StateChange
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::decrease_in_requests
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - missing_measurement_policy:
+        type: eu.ict-flame.policies.StateChange
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::missing_storage_measurements
+            action:
+              frontend:
+              - fqdn: frontend.app.ict-flame.eu
+                lifecycle_actions:
+                  Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - another_check:
+        type: eu.ict-flame.policies.StateChange
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: unknown::missing_storage_measurements
+            action:
+              frontend:
+              - fqdn: frontend.app.ict-flame.eu
+                lifecycle_actions:
+                  Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+
+
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-3.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-3.yaml
new file mode 100644
index 0000000..cd5fea8
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-3.yaml
@@ -0,0 +1,157 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - high_latency_check: # doesn't match alert spec policy name
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::high_latency
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::low_requests_event  # doesn't match alert spec event ID
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - cooldown_policy:
+        type: eu.ict-flame.policies.Update
+        description: Based on time constraint, we are shutting down Bristol and Manchester
+        triggers:
+          time_trigger:
+            description: Disconnect Nodes
+            condition:
+              constraint: unknown::time_frame
+              period: 60 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.booted
+        properties:
+          parent: high_latency_check
+          scheduling:
+            dt_start:
+              TZID: "W. Europe Standard Time"
+              time: 20180324T000000
+            dt_end:
+              tzid: "W. Europe Standard Time"
+              time: 20180324T050000
+            rrule:
+              freq: weekly
+              rule:
+                byday: [mo, tu, we, th, su]
+
+    - service_paid:
+        type: eu.ict-flame.policies.StateChange
+        description: Check outstanding payments. If there are outstanding payments, we shutdown the deployed service.
+        triggers:
+          not_paid_trigger:
+            description: Check if the payment is late
+            condition:
+              constraint: unknown::serviceIsNotPaid
+              period: 3600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.shutdown
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_action:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-4.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-4.yaml
new file mode 100644
index 0000000..deabe90
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-4.yaml
@@ -0,0 +1,174 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::high_latency
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::low_requests
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - missing_measurement_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::missing_storage_measurements_ID # inconsistent with the respective alert schema
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - cooldown_policy:
+        type: eu.ict-flame.policies.Update
+        description: Based on time constraint, we are shutting down Bristol and Manchester
+        triggers:
+          time_trigger:
+            description: Disconnect Nodes
+            condition:
+              constraint: unknown::time_frame
+              period: 60 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.booted
+        properties:
+          parent: high_latency_check
+          scheduling:
+            dt_start:
+              TZID: "W. Europe Standard Time"
+              time: 20180324T000000
+            dt_end:
+              tzid: "W. Europe Standard Time"
+              time: 20180324T050000
+            rrule:
+              freq: weekly
+              rule:
+                byday: [mo, tu, we, th, su]
+
+    - service_paid:
+        type: eu.ict-flame.policies.StateChange
+        description: Check outstanding payments. If there are outstanding payments, we shutdown the deployed service.
+        triggers:
+          not_paid_trigger:
+            description: Check if the payment is late
+            condition:
+              constraint: unknown::serviceIsNotPaid
+              period: 3600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.shutdown
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_action:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-5.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-5.yaml
new file mode 100644
index 0000000..20c1e78
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_invalid_test_config-5.yaml
@@ -0,0 +1,174 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::high_latency
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - low_requests_policy_ID:  # inconsistency with the respective alert specification
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::low_requests
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - requests_diff_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::increase_in_requests
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - cooldown_policy:
+        type: eu.ict-flame.policies.Update
+        description: Based on time constraint, we are shutting down Bristol and Manchester
+        triggers:
+          time_trigger:
+            description: Disconnect Nodes
+            condition:
+              constraint: unknown::time_frame
+              period: 60 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.booted
+        properties:
+          parent: high_latency_check
+          scheduling:
+            dt_start:
+              TZID: "W. Europe Standard Time"
+              time: 20180324T000000
+            dt_end:
+              tzid: "W. Europe Standard Time"
+              time: 20180324T050000
+            rrule:
+              freq: weekly
+              rule:
+                byday: [mo, tu, we, th, su]
+
+    - service_paid:
+        type: eu.ict-flame.policies.StateChange
+        description: Check outstanding payments. If there are outstanding payments, we shutdown the deployed service.
+        triggers:
+          not_paid_trigger:
+            description: Check if the payment is late
+            condition:
+              constraint: unknown::serviceIsNotPaid
+              period: 3600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.shutdown
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_action:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-1.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-1.yaml
new file mode 100644
index 0000000..ceb6582
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-1.yaml
@@ -0,0 +1,159 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::high_latency
+              period: 600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::low_requests
+              period: 600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - cooldown_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Based on time constraint, we are shutting down Bristol and Manchester
+        triggers:
+          time_trigger:
+            description: Disconnect Nodes
+            condition:
+              constraint: unknown::time_frame
+              period: 60 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.booted
+        properties:
+          parent: high_latency_check
+          scheduling:
+            dt_start:
+              TZID: "W. Europe Standard Time"
+              time: 20180324T000000
+            dt_end:
+              tzid: "W. Europe Standard Time"
+              time: 20180324T050000
+            rrule:
+              freq: weekly
+              rule:
+                byday: [mo, tu, we, th, su]
+
+    - service_paid:
+        type: eu.ict-flame.policies.StateChange
+        description: Check outstanding payments. If there are outstanding payments, we shutdown the deployed service.
+        triggers:
+          not_paid_trigger:
+            description: Check if the payment is late
+            condition:
+              constraint: unknown::serviceIsNotPaid
+              period: 3600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.shutdown
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_action:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-2.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-2.yaml
new file mode 100644
index 0000000..cda9496
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-2.yaml
@@ -0,0 +1,119 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - requests_diff_policy:
+        type: eu.ict-flame.policies.StateChange
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::decrease_in_requests
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - missing_measurement_policy:
+        type: eu.ict-flame.policies.StateChange
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::missing_storage_measurements
+            action:
+              frontend:
+              - fqdn: frontend.app.ict-flame.eu
+                lifecycle_actions:
+                  Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - another_check:
+        type: eu.ict-flame.policies.StateChange
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: unknown::missing_storage_measurements
+            action:
+              frontend:
+              - fqdn: frontend.app.ict-flame.eu
+                lifecycle_actions:
+                  Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+
+
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-3.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-3.yaml
new file mode 100644
index 0000000..6190b25
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-3.yaml
@@ -0,0 +1,157 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::high_latency
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::low_requests
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - cooldown_policy:
+        type: eu.ict-flame.policies.Update
+        description: Based on time constraint, we are shutting down Bristol and Manchester
+        triggers:
+          time_trigger:
+            description: Disconnect Nodes
+            condition:
+              constraint: unknown::time_frame
+              period: 60 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.booted
+        properties:
+          parent: high_latency_check
+          scheduling:
+            dt_start:
+              TZID: "W. Europe Standard Time"
+              time: 20180324T000000
+            dt_end:
+              tzid: "W. Europe Standard Time"
+              time: 20180324T050000
+            rrule:
+              freq: weekly
+              rule:
+                byday: [mo, tu, we, th, su]
+
+    - service_paid:
+        type: eu.ict-flame.policies.StateChange
+        description: Check outstanding payments. If there are outstanding payments, we shutdown the deployed service.
+        triggers:
+          not_paid_trigger:
+            description: Check if the payment is late
+            condition:
+              constraint: unknown::serviceIsNotPaid
+              period: 3600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.shutdown
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_action:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-4.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-4.yaml
new file mode 100644
index 0000000..fe46de4
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-4.yaml
@@ -0,0 +1,174 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::high_latency
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::low_requests
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - missing_measurement_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::missing_storage_measurements
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - cooldown_policy:
+        type: eu.ict-flame.policies.Update
+        description: Based on time constraint, we are shutting down Bristol and Manchester
+        triggers:
+          time_trigger:
+            description: Disconnect Nodes
+            condition:
+              constraint: unknown::time_frame
+              period: 60 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.booted
+        properties:
+          parent: high_latency_check
+          scheduling:
+            dt_start:
+              TZID: "W. Europe Standard Time"
+              time: 20180324T000000
+            dt_end:
+              tzid: "W. Europe Standard Time"
+              time: 20180324T050000
+            rrule:
+              freq: weekly
+              rule:
+                byday: [mo, tu, we, th, su]
+
+    - service_paid:
+        type: eu.ict-flame.policies.StateChange
+        description: Check outstanding payments. If there are outstanding payments, we shutdown the deployed service.
+        triggers:
+          not_paid_trigger:
+            description: Check if the payment is late
+            condition:
+              constraint: unknown::serviceIsNotPaid
+              period: 3600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.shutdown
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_action:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
diff --git a/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-5.yaml b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-5.yaml
new file mode 100644
index 0000000..82a200c
--- /dev/null
+++ b/src/service/resources/tosca/test-data/resource-spec/resources_valid_test_config-5.yaml
@@ -0,0 +1,174 @@
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+
+metadata:
+  template_name: Flame Test Tosca resource specification
+  sfc: companyA-VR
+  sfci: companyA-VR-premium
+
+# Import own definitions of nodes, capabilities and policy syntax.
+imports:
+  - flame_definitions-0.1.7.yaml
+
+# Starting the template
+
+## Topology
+topology_template:
+  node_templates:
+    database:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            mem_size: 4096 MB
+            disk_size: 10 GB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - db.app.ict-flame.eu
+
+
+    frontend:
+      type: eu.ict-flame.nodes.ServiceFunction
+      capabilities:
+        host:
+          properties:
+            num_cpus: 2
+            disk_size: 10 GB
+            mem_size: 4096 MB
+      properties:
+        hypervisor: kvm
+        image_url: http://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
+        fqdn:
+          - frontend.app.ict-flame.eu
+          - www.app.ict-flame.eu
+
+  ## Policies
+  policies:
+    - init:
+        type: eu.ict-flame.policies.InitialPolicy
+        description: Start the nodes initially
+        properties:
+          parent: service_paid
+        triggers:
+          inital_trigger:
+            condition:
+              constraint: initialise
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.connected
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - high_latency_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::high_latency
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - low_requests_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::low_requests
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - requests_diff_policy:
+        type: eu.ict-flame.policies.StateChange
+        description: Check Latency and peform a connect of another node when the latency is too high.
+        properties:
+          parent: service_paid
+        triggers:
+          check_trigger:
+            description: Check high latency on relationships
+            condition:
+              constraint: clmc::increase_in_requests
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.connected
+
+    - cooldown_policy:
+        type: eu.ict-flame.policies.Update
+        description: Based on time constraint, we are shutting down Bristol and Manchester
+        triggers:
+          time_trigger:
+            description: Disconnect Nodes
+            condition:
+              constraint: unknown::time_frame
+              period: 60 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.booted
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.booted
+        properties:
+          parent: high_latency_check
+          scheduling:
+            dt_start:
+              TZID: "W. Europe Standard Time"
+              time: 20180324T000000
+            dt_end:
+              tzid: "W. Europe Standard Time"
+              time: 20180324T050000
+            rrule:
+              freq: weekly
+              rule:
+                byday: [mo, tu, we, th, su]
+
+    - service_paid:
+        type: eu.ict-flame.policies.StateChange
+        description: Check outstanding payments. If there are outstanding payments, we shutdown the deployed service.
+        triggers:
+          not_paid_trigger:
+            description: Check if the payment is late
+            condition:
+              constraint: unknown::serviceIsNotPaid
+              period: 3600 # integer required, unit: seconds
+            action:
+              frontend:
+                -
+                  fqdn: frontend.app.ict-flame.eu
+                  lifecycle_actions:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Bristol: eu.ict-flame.sfe.state.lifecycle.shutdown
+                    Manchester: eu.ict-flame.sfe.state.lifecycle.shutdown
+              database:
+                -
+                  fqdn: db.app.ict-flame.eu
+                  lifecycle_action:
+                    London: eu.ict-flame.sfe.state.lifecycle.shutdown
-- 
GitLab