diff --git a/docs/clmc-service.md b/docs/clmc-service.md
index e1b21a49f155c68929a5f95766c413fa0bea8d0f..f39c92dc2841f7a02e736ad6ee45e3d99eeeae13 100644
--- a/docs/clmc-service.md
+++ b/docs/clmc-service.md
@@ -36,6 +36,29 @@ It offers different API endpoints such as GraphAPI for calculating round trip ti
 configuration data and Alerts API for creating and subscribing to alerts in Kapacitor. All source code, tests and 
 configuration files of the service can be found in the **src/service** folder.
 
+#### Notes
+* Interacting with *Chronograf* - use ***http://<clmc-host>/chronograf***. You will be asked to enter connection details.
+The only input that you need to edit is the *Connection String* - set it to **http://<clmc-host>:8086** and click the
+**Add Source** button.
+
+* Interacting with *Kapacitor* - the Kapacitor HTTP API documentation can be found here: https://docs.influxdata.com/kapacitor/v1.4/working/api/
+Notice that all of the URL paths provided by Kapacitor are already namespaced using base path ***/kapacitor/v1***. Therefore, no other prefix is required
+when interacting with the Kapacitor application running on the clmc container, e.g.  
+***http://<clmc-host>/kapacitor/v1/tasks***  
+as described in the Kapacitor API reference.
+
+* Interacting with *InfluxDB* - the InfluxDB HTTP API documentation can be found here: https://docs.influxdata.com/influxdb/v1.5/tools/api/
+In order to interact with the InfluxDB application running on the clmc container, prefix all URL paths in the documentation 
+with **/influxdb**, e.g.  
+***http://<clmc-host>/influxdb/query***
+
+* Interacting with *neo4j* - use ***http://localhost/neo4j/browser/***. This will open the neo4j browser, which lets you
+interact with the graph using Cypher queries (if necessary).
+
+* Interacting with *clmc-serivce* - the API endpoints listed in the following sections relate to direct interactions with the clmc-service 
+application server (listening on port 9080). If interacting with the clmc container, all of the listed below URIs must be prefixed 
+with **/clmc-service** so that the nginx reverse proxy server (listening on port 80) can forward to requests to the correct application, e.g.  
+***http://<clmc-host>/clmc-service/alerts?sfc={service function chain id}&sfci={service function chain instance id}&policy={policy id}&trigger={trigger id}***.
 
 ## Alerts API Endpoints
 
diff --git a/scripts/clmc-service/nginx.conf b/scripts/clmc-service/nginx.conf
index 3cb407fac3096d2683aa50f5a1b60f32ccaa8342..813ac0c7228b3874fc7775a2b8b0992d729da4dc 100644
--- a/scripts/clmc-service/nginx.conf
+++ b/scripts/clmc-service/nginx.conf
@@ -56,12 +56,15 @@ http {
             proxy_set_header X-Forwarded-Host $server_name;
         }
         location /chronograf {
-            proxy_pass http://127.0.0.1:8888; # No trailing slash - chronograf is configured to include /chronograf in its routes
+            proxy_pass http://127.0.0.1:8888; # No trailing slash - chronograf is configured to include '/chronograf' prefix in its routes
             proxy_redirect off;
             proxy_set_header Host $host;
             proxy_set_header X-Real-IP $remote_addr;
             proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
             proxy_set_header X-Forwarded-Host $server_name;
         }
+        location / {
+            return 404;  # return 404 error for every other URL path - this also overrides the nginx default welcome page
+        }
     }
 }
\ No newline at end of file
diff --git a/src/service/clmcservice/alertsapi/alerts_specification_schema.py b/src/service/clmcservice/alertsapi/alerts_specification_schema.py
index 6f6b1e03b947bfa0f360bc3f3e8cee14decc07f2..e2365bf21704b2b926ee8049badd956fca6330c2 100644
--- a/src/service/clmcservice/alertsapi/alerts_specification_schema.py
+++ b/src/service/clmcservice/alertsapi/alerts_specification_schema.py
@@ -23,22 +23,22 @@
 """
 
 # Python standard libs
-from re import compile, IGNORECASE
+import re
 
 # PIP installed libs
 from schema import Schema, And, Or, Optional, SchemaError
 
-"""
-This module defines the schema objects for the TOSCA Alert Specification:
-
-        * flame_clmc_alerts_definitions.yaml must be the only import
-        * metadata section must be present (with key-value pairs for sfc and sfci)
-        * policies section must be present (under the topology_template node)
-        * each policy must be associated with a triggers node (containing at least 1 trigger)
-        * each policy is of type eu.ict-flame.policies.StateChange or eu.ict-flame.policies.Alert
-        * each trigger must specify event_type, metric, condition, and at least one handler in action/implementation
-        * the condition section must specify threshold, granularity, aggregation_method, comparison_operator
-"""
+
+# This module defines the schema objects for the TOSCA Alert Specification:
+#
+#         * flame_clmc_alerts_definitions.yaml must be the only import
+#         * metadata section must be present (with key-value pairs for sfc and sfci)
+#         * policies section must be present (under the topology_template node)
+#         * each policy must be associated with a triggers node (containing at least 1 trigger)
+#         * each policy is of type eu.ict-flame.policies.StateChange or eu.ict-flame.policies.Alert
+#         * each trigger must specify event_type, metric, condition, and at least one handler in action/implementation
+#         * the condition section must specify threshold, granularity, aggregation_method, comparison_operator
+
 
 # Influx QL functions defined in the documentation https://docs.influxdata.com/influxdb/v1.6/query_language/functions/
 INFLUX_QL_FUNCTIONS = (
@@ -49,17 +49,17 @@ INFLUX_QL_FUNCTIONS = (
 TICK_SCRIPT_TEMPLATES = ("threshold", "relative", "deadman")
 
 # Allowed comparison operators and their logical values
-COMPARISON_OPERATORS = {"lt": "<", "gt": ">", "lte": "<=", "gte": ">=", "eq": "=", "neq": "<>"}
+COMPARISON_OPERATORS = {"lt": "<", "gt": ">", "lte": "<=", "gte": ">=", "eq": "==", "neq": "!="}
 
 # Regular expression for validating http handlers
-URL_REGEX = compile(
+URL_REGEX = re.compile(
     r'^https?://'  # http:// or https://
     r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'  # domain, e.g. example.domain.com
     r'localhost|'  # or localhost...
     r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'  # or IP address (IPv4 format)
     r'(?::\d{2,5})?'  # optional port number
     r'(?:[/?#][^\s]*)?$',  # URL path or query parameters
-    IGNORECASE)
+    re.IGNORECASE)
 
 # Global tags allowed to be used for filtering in the trigger condition
 CLMC_INFORMATION_MODEL_GLOBAL_TAGS = {"flame_sfc", "flame_sfci", "flame_sfp", "flame_sf", "flame_sfe", "flame_server", "flame_location"}
@@ -84,7 +84,7 @@ ALERTS_SPECIFICATION_SCHEMA = Schema({
                             "metric": And(str, lambda s: len(s.split('.', 1)) == 2),
                             "condition": {
                                 "threshold": Or(int, float),
-                                "granularity": int,
+                                "granularity": And(int, lambda p: p > 0),
                                 Optional("aggregation_method"): And(str, lambda s: s in INFLUX_QL_FUNCTIONS),
                                 Optional("resource_type"): {
                                     And(str, lambda s: s in CLMC_INFORMATION_MODEL_GLOBAL_TAGS): str
@@ -120,10 +120,10 @@ def validate_clmc_alerts_specification(tosca_yaml_tpl, include_error=False):
     try:
         ALERTS_SPECIFICATION_SCHEMA.validate(tosca_yaml_tpl)
         valid, err = True, None
-    except SchemaError as e:
-        valid, err = False, e
+    except SchemaError as schema_error:
+        valid, err = False, schema_error
 
     if include_error:
         return valid, err
-    else:
-        return valid
+
+    return valid
diff --git a/src/service/clmcservice/alertsapi/tests.py b/src/service/clmcservice/alertsapi/tests.py
index 09e4d61c071f8b6c47f36a4d2af7761459241d68..c142707c06f7723b7597ea1d9a4898b95c12a4c7 100644
--- a/src/service/clmcservice/alertsapi/tests.py
+++ b/src/service/clmcservice/alertsapi/tests.py
@@ -26,7 +26,6 @@
 # Python standard libs
 from os import listdir
 from os.path import isfile, join, dirname
-from urllib.parse import urlparse
 
 # PIP installed libs
 import pytest
@@ -208,13 +207,14 @@ class TestAlertsConfigurationAPI(object):
             assert "triggers_action_errors" not in clmc_service_response, "Unexpected error was returned for handlers specification"
 
             # traverse through all alert IDs and check that they are created within Kapacitor
-            for alert_id in alert_ids:
+            for alert_id, alert_type in alert_ids:
                 kapacitor_response = get("http://localhost:9092/kapacitor/v1/tasks/{0}".format(alert_id))
                 assert kapacitor_response.status_code == 200, "Alert with ID {0} was not created - test file {1}.".format(alert_id, alerts_test_file)
                 kapacitor_response_json = kapacitor_response.json()
                 assert "link" in kapacitor_response_json, "Incorrect response from kapacitor for alert with ID {0} - test file {1}".format(alert_id, alerts_test_file)
                 assert kapacitor_response_json["status"] == "enabled", "Alert with ID {0} was created but is disabled - test file {1}".format(alert_id, alerts_test_file)
                 assert kapacitor_response_json["executing"], "Alert with ID {0} was created and is enabled, but is not executing - test file {1}".format(alert_id, alerts_test_file)
+                assert kapacitor_response_json["type"] == alert_type,  "Alert with ID {0} was created with the wrong type - test file {1}".format(alert_id, alerts_test_file)
 
             # check that all topic IDs were registered within Kapacitor
             topic_ids = list(topic_handlers.keys())
@@ -284,23 +284,45 @@ def extract_alert_spec_data(alert_spec):
         policy_id = policy.name
         for trigger in policy.triggers:
             trigger_id = trigger.name
+            event_type = trigger.trigger_tpl["event_type"]
+            alert_period_integer = trigger.trigger_tpl["condition"]["granularity"]
 
             topic_id = "{0}\n{1}\n{2}\n{3}".format(sfc, sfc_instance, policy_id, trigger_id)
             topic_id = AlertsConfigurationAPI.get_hash(topic_id)
             topic_handlers[topic_id] = []
 
             alert_id = topic_id
-            alert_ids.append(alert_id)
+            alert_type = get_alert_type(event_type, alert_period_integer)
+            alert_ids.append((alert_id, alert_type))
 
             for handler_url in trigger.trigger_tpl["action"]["implementation"]:
-                handler_host = urlparse(handler_url).hostname
-                handler_id = "{0}\n{1}\n{2}\n{3}\n{4}".format(sfc, sfc_instance, policy_id, trigger_id, handler_host)
+                handler_id = "{0}\n{1}\n{2}\n{3}\n{4}".format(sfc, sfc_instance, policy_id, trigger_id, handler_url)
                 handler_id = AlertsConfigurationAPI.get_hash(handler_id)
                 topic_handlers[topic_id].append((handler_id, handler_url))
 
     return sfc, sfc_instance, alert_ids, topic_handlers
 
 
+def get_alert_type(event_type, alert_period):
+    """
+    Retrieve the alert type (stream ot batch) based on the event type and alert period.
+
+    :param event_type: event type, e.g. threshold, relative, deadman, etc.
+    :param alert_period: the alert period
+
+    :return: "batch" or "stream"
+    """
+
+    if event_type in AlertsConfigurationAPI.DUAL_VERSION_TEMPLATES:
+        if alert_period <= AlertsConfigurationAPI.STREAM_PERIOD_LIMIT:
+            return "stream"
+        else:
+            return "batch"
+    else:
+        events = {"relative": "batch", "deadman": "stream"}
+        return events[event_type]
+
+
 def clear_kapacitor_alerts(alert_ids, topic_handlers):
     """
     A utility function to clean up Kapacitor from the configured alerts, topics and handlers.
@@ -309,7 +331,7 @@ def clear_kapacitor_alerts(alert_ids, topic_handlers):
     :param topic_handlers: the dictionary of topic and handlers to delete
     """
 
-    for alert_id in alert_ids:
+    for alert_id, _ in alert_ids:
         kapacitor_response = delete("http://localhost:9092/kapacitor/v1/tasks/{0}".format(alert_id))  # delete alert
         assert kapacitor_response.status_code == 204
 
diff --git a/src/service/clmcservice/alertsapi/utilities.py b/src/service/clmcservice/alertsapi/utilities.py
index a2eec79beb2af43a3ec605656264da70bbbcbec9..eba22f2c3e631b8548ed4d3c79dcff591d302187 100644
--- a/src/service/clmcservice/alertsapi/utilities.py
+++ b/src/service/clmcservice/alertsapi/utilities.py
@@ -135,22 +135,6 @@ class TICKScriptTemplateFiller:
     A utility class used for TICK script templates filtering.
     """
 
-    # a class variable used to hold the comparison operator used to build the where clause in TICK script templates,
-    # these differ if the where clause is built as a string opposed to when it is build as a lambda
-    _TEMPLATE_COMPARISON_OPERATOR = {"threshold": "=", "relative": "=", "deadman": "=="}
-
-    @staticmethod
-    def get_comparison_operator(template_type):
-        """
-        Get the correct comparison operator depending on the template type, if template type not recognized, return "=="
-
-        :param template_type: one of the template types, that are created within kapacitor
-
-        :return: the comparison operator that should be used in the template to build the where clause
-        """
-
-        return TICKScriptTemplateFiller._TEMPLATE_COMPARISON_OPERATOR.get(template_type, "==")
-
     @staticmethod
     def fill_template_vars(template_type, **kwargs):
         """
@@ -168,8 +152,8 @@ class TICKScriptTemplateFiller:
         return fill_function(**kwargs)
 
     @staticmethod
-    def _fill_threshold_template_vars(db=None, measurement=None, field=None, influx_function=None, critical_value=None,
-                                      comparison_operator=None, alert_period=None, topic_id=None, where_clause=None, **kwargs):
+    def _fill_threshold_batch_template_vars(db=None, measurement=None, field=None, influx_function=None, critical_value=None,
+                                            comparison_operator=None, alert_period=None, topic_id=None, event_id=None, where_clause=None, **kwargs):
         """
         Creates a dictionary object ready to be posted to kapacitor to create a "threshold" task from template.
 
@@ -181,6 +165,7 @@ class TICKScriptTemplateFiller:
         :param comparison_operator: type of comparison
         :param alert_period: alert period to query influx
         :param topic_id: topic identifier
+        :param event_id: event identifier
         :param where_clause: (OPTIONAL) argument for filtering the influx query by tag values
 
         :return: a dictionary object ready to be posted to kapacitor to create a "threshold" task from template.
@@ -216,6 +201,10 @@ class TICKScriptTemplateFiller:
             "topicID": {
                 "type": "string",
                 "value": topic_id
+            },
+            "eventID": {
+                "type": "string",
+                "value": event_id
             }
         }
 
@@ -227,9 +216,63 @@ class TICKScriptTemplateFiller:
 
         return template_vars
 
+    @staticmethod
+    def _fill_threshold_stream_template_vars(db=None, measurement=None, field=None, critical_value=None,
+                                             comparison_operator=None, topic_id=None, event_id=None, where_clause=None, **kwargs):
+        """
+        Creates a dictionary object ready to be posted to kapacitor to create a "threshold" task from template.
+
+        :param db: db name
+        :param measurement: measurement name
+        :param field: field name
+        :param influx_function: influx function to use for querying
+        :param critical_value: critical value to compare with
+        :param comparison_operator: type of comparison
+        :param alert_period: alert period to query influx
+        :param topic_id: topic identifier
+        :param event_id: event identifier
+        :param where_clause: (OPTIONAL) argument for filtering the influx query by tag values
+
+        :return: a dictionary object ready to be posted to kapacitor to create a "threshold" task from template.
+        """
+
+        comparison_lambda = '"{0}" {1} {2}'.format(field, comparison_operator, critical_value)  # build up lambda string, e.g. "real_value" >= 10
+
+        template_vars = {
+            "db": {
+                "type": "string",
+                "value": db
+            },
+            "measurement": {
+                "type": "string",
+                "value": measurement
+            },
+            "comparisonLambda": {
+                "type": "lambda",
+                "value": comparison_lambda
+            },
+            "topicID": {
+                "type": "string",
+                "value": topic_id
+            },
+            "eventID": {
+                "type": "string",
+                "value": event_id
+            }
+        }
+
+        if where_clause is not None:
+            where_clause = where_clause.replace("=", "==")  # stream templates use a lambda function, which requires "==" as comparison operator
+            template_vars["whereClause"] = {
+                "type": "lambda",
+                "value": where_clause
+            }
+
+        return template_vars
+
     @staticmethod
     def _fill_relative_template_vars(db=None, measurement=None, field=None, influx_function=None, critical_value=None, comparison_operator=None,
-                                     alert_period=None, topic_id=None, where_clause=None, **kwargs):
+                                     alert_period=None, topic_id=None, event_id=None, where_clause=None, **kwargs):
         """
         Creates a dictionary object ready to be posted to kapacitor to create a "relative" task from template.
 
@@ -241,6 +284,7 @@ class TICKScriptTemplateFiller:
         :param comparison_operator: type of comparison
         :param alert_period: alert period to use for relative comparison
         :param topic_id: topic identifier
+        :param event_id: event identifier
         :param where_clause: (OPTIONAL) argument for filtering the influx query by tag values
 
         :return: a dictionary object ready to be posted to kapacitor to create a "relative" task from template.
@@ -276,6 +320,10 @@ class TICKScriptTemplateFiller:
             "topicID": {
                 "type": "string",
                 "value": topic_id
+            },
+            "eventID": {
+                "type": "string",
+                "value": event_id
             }
         }
 
@@ -288,7 +336,7 @@ class TICKScriptTemplateFiller:
         return template_vars
 
     @staticmethod
-    def _fill_deadman_template_vars(db=None, measurement=None, critical_value=None, alert_period=None, topic_id=None, where_clause=None, **kwargs):
+    def _fill_deadman_template_vars(db=None, measurement=None, critical_value=None, alert_period=None, topic_id=None, event_id=None, where_clause=None, **kwargs):
         """
         Creates a dictionary object ready to be posted to kapacitor to create a "deadman" task from template.
 
@@ -297,6 +345,7 @@ class TICKScriptTemplateFiller:
         :param critical_value: critical value to compare with
         :param alert_period: alert period to use for relative comparison
         :param topic_id: topic identifier
+        :param event_id: event identifier
         :param where_clause: (OPTIONAL) argument for filtering the influx query by tag values
 
         :return: a dictionary object ready to be posted to kapacitor to create a "deadman" task from template.
@@ -322,10 +371,15 @@ class TICKScriptTemplateFiller:
             "topicID": {
                 "type": "string",
                 "value": topic_id
+            },
+            "eventID": {
+                "type": "string",
+                "value": event_id
             }
         }
 
         if where_clause is not None:
+            where_clause = where_clause.replace("=", "==")  # stream templates use a lambda function, which requires "==" as comparison operator
             template_vars["whereClause"] = {
                 "type": "lambda",
                 "value": where_clause
diff --git a/src/service/clmcservice/alertsapi/views.py b/src/service/clmcservice/alertsapi/views.py
index 34490678d30774d4fee6ea445257d59113b7633c..74ce6e928c2495243c64dec30906d4f942755fb7 100644
--- a/src/service/clmcservice/alertsapi/views.py
+++ b/src/service/clmcservice/alertsapi/views.py
@@ -24,7 +24,6 @@
 
 # Python standard libs
 import logging
-from urllib.parse import urlparse
 from hashlib import sha256
 
 # PIP installed libs
@@ -36,7 +35,7 @@ from requests import post
 
 # CLMC-service imports
 from clmcservice.alertsapi.utilities import adjust_tosca_definitions_import, TICKScriptTemplateFiller, fill_http_post_handler_vars, get_resource_spec_topic_ids, get_alert_spec_topic_ids
-from clmcservice.alertsapi.alerts_specification_schema import COMPARISON_OPERATORS,  validate_clmc_alerts_specification
+from clmcservice.alertsapi.alerts_specification_schema import COMPARISON_OPERATORS, validate_clmc_alerts_specification
 
 # initialise logger
 log = logging.getLogger('service_logger')
@@ -48,6 +47,10 @@ class AlertsConfigurationAPI(object):
     A class-based view for configuring alerts within CLMC.
     """
 
+    STREAM_PERIOD_LIMIT = 60  # if alert period is <= 60 seconds, then a stream template is used, otherwise use batch
+
+    DUAL_VERSION_TEMPLATES = {"threshold"}  # this set defines all template types that are written in two versions (stream and batch)
+
     def __init__(self, request):
         """
         Initialises the instance of the view with the request argument.
@@ -59,6 +62,9 @@ class AlertsConfigurationAPI(object):
 
     @view_config(route_name='alerts_configuration', request_method='GET')
     def get_alerts_hash(self):
+        """
+        Retrieves hash value for alerts task, topic and handlers based on sfc, sfci, policy and trigger IDs
+        """
 
         for param in ("sfc", "sfci", "policy", "trigger"):
             if param not in self.request.params:
@@ -211,7 +217,8 @@ class AlertsConfigurationAPI(object):
 
                 condition = trigger.trigger_tpl["condition"]
                 critical_value = float(condition["threshold"])
-                alert_period = "{0}s".format(condition["granularity"])
+                alert_period_integer = condition["granularity"]
+                alert_period = "{0}s".format(alert_period_integer)
                 influx_function = condition.get("aggregation_method", "mean")  # if not specified, use "mean"
 
                 # check for tag filtering
@@ -222,12 +229,9 @@ class AlertsConfigurationAPI(object):
                     tags["flame_sfc"] = sfc
                     tags["flame_sfci"] = sfc_instance
 
-                    # NOTE: if the template has its where clause defined as lambda (stream templates), then use "==" as comparison operator,
-                    #       else if the template's where clause is defined as a string (batch templates), then use "=" as comparison operator
-                    filter_comparison_operator = TICKScriptTemplateFiller.get_comparison_operator(event_type)  # retrieves the correct comparison operator to use for building the where clause
-
                     # build up the where clause from the tags dictionary
-                    where_clause = " AND ".join(map(lambda tag_name: '"{0}"{1}\'{2}\''.format(tag_name, filter_comparison_operator, tags[tag_name]), tags))
+                    where_filters_list = map(lambda tag_name: '"{0}"=\'{1}\''.format(tag_name, tags[tag_name]), tags)
+                    where_clause = " AND ".join(where_filters_list)
 
                 comparison_operator = COMPARISON_OPERATORS[condition.get("comparison_operator", "gte")]  # if not specified, use "gte" (>=)
 
@@ -236,11 +240,20 @@ class AlertsConfigurationAPI(object):
                 topic_id = self.get_hash(topic_id)
                 alert_id = topic_id
 
+                # check whether the template needs to be a stream or a batch
+                if event_type in self.DUAL_VERSION_TEMPLATES:
+                    if alert_period_integer <= self.STREAM_PERIOD_LIMIT:
+                        template_id = "{0}-stream-template".format(event_type)
+                        event_type = "{0}_stream".format(event_type)
+                    else:
+                        template_id = "{0}-batch-template".format(event_type)
+                        event_type = "{0}_batch".format(event_type)
+
                 # built up the template vars dictionary depending on the event type (threshold, relative, etc.)
                 # all extracted properties from the trigger are passed, the TICKScriptTemplateFiller entry point then forwards those to the appropriate function for template filling
                 template_vars = TICKScriptTemplateFiller.fill_template_vars(event_type, db=db, measurement=measurement, field=field, influx_function=influx_function,
                                                                             critical_value=critical_value, comparison_operator=comparison_operator, alert_period=alert_period,
-                                                                            topic_id=topic_id, where_clause=where_clause)
+                                                                            topic_id=topic_id, event_id=event_id, where_clause=where_clause)
 
                 # create and activate alert task through the kapacitor HTTP API
                 kapacitor_api_tasks_url = "http://{0}:{1}/kapacitor/v1/tasks".format(kapacitor_host, kapacitor_port)
@@ -289,8 +302,7 @@ class AlertsConfigurationAPI(object):
 
         kapacitor_api_handlers_url = "http://{0}:{1}/kapacitor/v1/alerts/topics/{2}/handlers".format(kapacitor_host, kapacitor_port, topic_id)
         for http_handler_url in http_handlers:
-            http_handler_host = urlparse(http_handler_url).hostname
-            handler_id = "{0}\n{1}\n{2}\n{3}\n{4}".format(sfc, sfc_i, policy_id, event_id, http_handler_host)
+            handler_id = "{0}\n{1}\n{2}\n{3}\n{4}".format(sfc, sfc_i, policy_id, event_id, http_handler_url)
             handler_id = self.get_hash(handler_id)
             kapacitor_http_request_body = fill_http_post_handler_vars(handler_id, http_handler_url)
             response = post(kapacitor_api_handlers_url, json=kapacitor_http_request_body)
diff --git a/src/service/resources/TICKscript/deadman-template.tick b/src/service/resources/TICKscript/deadman-template.tick
index 2392b7716a3ee60acbaaaef4e6319e13b35f1bf2..5762d4ad5505972b989f1561a36ede5e3bc099b7 100644
--- a/src/service/resources/TICKscript/deadman-template.tick
+++ b/src/service/resources/TICKscript/deadman-template.tick
@@ -14,6 +14,8 @@ var throughputThreshold float  // alerts will trigger if data points reported du
 
 var topicID string
 
+var eventID string  // topicID is based on the event ID, but represents a hash value
+
 
 stream
     | from()
@@ -22,7 +24,7 @@ stream
         .measurement(measurement)
         .where(whereClause)
     | deadman(throughputThreshold, alertPeriod)
-        .id(topicID)
+        .id(eventID)
         .details('db=' + db + ',measurement=' + measurement)
         .message(messageValue)
         .topic(topicID)
diff --git a/src/service/resources/TICKscript/relative-template.tick b/src/service/resources/TICKscript/relative-template.tick
index 2363ef4e2ddd284040e825af1433c2ee658e6ee1..30ac95c962a6bf547c93935810beb2d108a38b3f 100644
--- a/src/service/resources/TICKscript/relative-template.tick
+++ b/src/service/resources/TICKscript/relative-template.tick
@@ -18,6 +18,8 @@ var alertPeriod duration
 
 var topicID string
 
+var eventID string  // topicID is based on the event ID, but represents a hash value
+
 
 var current = batch
     |query('SELECT ' + influxFunction + '(' + field + ') AS value FROM "' + db + '"."' + rp + '"."' + measurement + '" WHERE ' + whereClause)
@@ -39,7 +41,7 @@ past
     | eval(lambda: float("current.value" - "past.value"))
         .as('diff')
     | alert()
-        .id(topicID)
+        .id(eventID)
         .details('db=' + db + ',measurement=' + measurement)
         .crit(comparisonLambda)
         .message(messageValue)
diff --git a/src/service/resources/TICKscript/threshold-template.tick b/src/service/resources/TICKscript/threshold-batch-template.tick
similarity index 89%
rename from src/service/resources/TICKscript/threshold-template.tick
rename to src/service/resources/TICKscript/threshold-batch-template.tick
index 5518814f5a4c652fdf9c6b70496de486261a5678..379a49c4fffb030f8f1775c658c1b25cf8f64d2e 100644
--- a/src/service/resources/TICKscript/threshold-template.tick
+++ b/src/service/resources/TICKscript/threshold-batch-template.tick
@@ -18,13 +18,14 @@ var alertPeriod duration
 
 var topicID string
 
+var eventID string  // topicID is based on the event ID, but represents a hash value
 
 batch
     |query('SELECT ' + influxFunction + '(' + field + ') AS real_value FROM "' + db + '"."' + rp + '"."' + measurement + '" WHERE ' + whereClause)
         .period(alertPeriod)
         .every(alertPeriod)
     |alert()
-        .id(topicID)
+        .id(eventID)
         .details('db=' + db + ',measurement=' + measurement)
         .crit(comparisonLambda)
         .message(messageValue)
diff --git a/src/service/resources/TICKscript/threshold-stream-template.tick b/src/service/resources/TICKscript/threshold-stream-template.tick
new file mode 100644
index 0000000000000000000000000000000000000000..6ee92fd2c03b7d68efa871bff02dc53b29f1c1fd
--- /dev/null
+++ b/src/service/resources/TICKscript/threshold-stream-template.tick
@@ -0,0 +1,30 @@
+var db string  // database per service function chain, so db is named after sfc
+
+var rp = 'autogen'  // default value for the retention policy
+
+var measurement string
+
+var whereClause = lambda: TRUE  // default value is a function which returns TRUE, hence no filtering of the query result
+
+var messageValue = 'TRUE'  // default value is TRUE, as this is what SFEMC expects as a notification for an event rule
+
+var comparisonLambda lambda  // comparison function e.g. "real_value" > 40
+
+var topicID string
+
+var eventID string  // topicID is based on the event ID, but represents a hash value
+
+
+stream
+    | from()
+        .database(db)
+        .retentionPolicy(rp)
+        .measurement(measurement)
+        .where(whereClause)
+    | alert()
+        .id(eventID)
+        .details('db=' + db + ',measurement=' + measurement)
+        .crit(comparisonLambda)
+        .message(messageValue)
+        .topic(topicID)
+        .noRecoveries()
diff --git a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-1.yaml b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-1.yaml
index eba786b9bed5f58890851a95b76f8bba7fc19d7e..0114a3546eaadda10e54ab0e051f67eebe303b40 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-1.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-1.yaml
@@ -21,11 +21,11 @@ topology_template:
             metric: network.latency
             condition:
               threshold: 45
-              granularity: 120
+              granularity: 30
               aggregation_method: mean
               resource_type:
                 flame_location: watershed
-              comparison_operator: gt
+              comparison_operator: eq
             action:
               implementation:
                 - http://sfemc.flame.eu/notify
diff --git a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-4.yaml b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-4.yaml
index 87e31406e07c0b33767108e9c9de2091a160ef79..13808f97ec173a5e1187d1a2551875b60e74208d 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-4.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-4.yaml
@@ -21,12 +21,12 @@ topology_template:
             metric: network.latency
             condition:
               threshold: 45
-              granularity: 120
+              granularity: 45
               aggregation_method: median
               resource_type:
                 flame_location: watershed
                 flame_server: watershed
-              comparison_operator: gt
+              comparison_operator: neq
             action:
               implementation:
                 - http://sfemc.flame.eu/notify
@@ -43,7 +43,7 @@ topology_template:
               granularity: 60
               aggregation_method: first
               # resource type missing - optional, so it is valid
-              comparison_operator: lt
+              comparison_operator: lte
             action:
               implementation:
                 - http://sfemc.flame.eu/notify
diff --git a/src/test/clmctest/alerts/alerts_test_config.yaml b/src/test/clmctest/alerts/alerts_test_config.yaml
index c2e9332e7a5783966223f3debd9bf7418866491c..fe293a0308352acb4f694e208708db7c1d3d06fa 100644
--- a/src/test/clmctest/alerts/alerts_test_config.yaml
+++ b/src/test/clmctest/alerts/alerts_test_config.yaml
@@ -31,18 +31,18 @@ topology_template:
             action:
               implementation:
                 - http://172.40.231.200:9999/
-          high_cpu_usage:
-            description: This event triggers when the cpu system usage is too high.
+          increase_in_running_processes:
+            description: This event triggers when the max number of running processes increases.
             event_type: threshold
-            metric: cpu.usage_system
+            metric: processes.running
             condition:
-              threshold: 10
+              threshold: 0
               granularity: 10
-              aggregation_method: mean
+              aggregation_method: max
               resource_type:
                 flame_location: DC1
                 flame_sfp: nginx
-              comparison_operator: lte
+              comparison_operator: gte
             action:
               implementation:
                 - http://172.40.231.200:9999/
diff --git a/src/test/clmctest/alerts/conftest.py b/src/test/clmctest/alerts/conftest.py
index 73a65b726d0f6af32e18f71d21b90d30e4189b77..02d004df047f89dbbe44c2fd3e3d32830a14b1ea 100644
--- a/src/test/clmctest/alerts/conftest.py
+++ b/src/test/clmctest/alerts/conftest.py
@@ -54,7 +54,7 @@ def rspec_config():
     return data_loaded
 
 
-@fixture(autouse=True, scope="module")
+@fixture(scope="module")
 def set_up_tear_down_fixture(rspec_config):
     """
     Set up/tear down fixture for the alerts integration test.
diff --git a/src/test/clmctest/alerts/resources_test_config.yaml b/src/test/clmctest/alerts/resources_test_config.yaml
index 45591f166d254eab8840a276a3897d6210f0cf1b..061c227317c1e4f6818e34b173b1f42eb8eb2907 100644
--- a/src/test/clmctest/alerts/resources_test_config.yaml
+++ b/src/test/clmctest/alerts/resources_test_config.yaml
@@ -85,7 +85,7 @@ topology_template:
                     Bristol: eu.ict-flame.sfe.state.lifecycle.connected
           tigger_b:
             condition:
-              constraint: clmc::high_cpu_usage
+              constraint: clmc::increase_in_running_processes
               period: 600 # integer required, unit: seconds
             action:
               frontend:
diff --git a/src/test/clmctest/alerts/test_alerts.py b/src/test/clmctest/alerts/test_alerts.py
index e5535f459f117cbcd5d2b7cc5effc522c6c07360..59fefb48eb0f7f5e3a88f13c5ba4f06943924f51 100644
--- a/src/test/clmctest/alerts/test_alerts.py
+++ b/src/test/clmctest/alerts/test_alerts.py
@@ -35,7 +35,7 @@ NGINX_PORT = 80
 
 class TestAlerts(object):
 
-    def test_alert_triggers(self, rspec_config):
+    def test_alert_triggers(self, rspec_config, set_up_tear_down_fixture):
         """
         Test is implemented using the following steps:
             * Send clmc service a TOSCA alert spec. file