diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index e2a7098b150f40a07e3a581e4da07665cb624dd8..f72b18dd37ee3f7e676af87c07093d26ad26b241 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -35,8 +35,8 @@ build:tests:
     - python setup.py sdist --dist-dir=$CI_PROJECT_DIR/build
   artifacts:
     paths:
-    - build/clmctest-2.4.0.tar.gz
-    - build/clmcservice-2.4.0.tar.gz
+    - build/clmctest-2.4.1.tar.gz
+    - build/clmcservice-2.4.1.tar.gz
     expire_in: 1 day
 
 test:all:
@@ -50,8 +50,8 @@ test:all:
     - echo "REPO_PASS=${REPO_PASS}" >> $CI_PROJECT_DIR/reporc
     - sudo scripts/test/fixture.sh create -f src/test/clmctest/rspec.json -r $CI_PROJECT_DIR -c all
     - sudo mkdir /var/lib/lxd/containers/test-runner/rootfs/opt/clmc/build
-    - sudo cp build/clmctest-2.4.0.tar.gz /var/lib/lxd/containers/test-runner/rootfs/opt/clmc/build
-    - sudo lxc exec test-runner -- pip3 install /opt/clmc/build/clmctest-2.4.0.tar.gz
+    - sudo cp build/clmctest-2.4.1.tar.gz /var/lib/lxd/containers/test-runner/rootfs/opt/clmc/build
+    - sudo lxc exec test-runner -- pip3 install /opt/clmc/build/clmctest-2.4.1.tar.gz
     - sudo lxc exec test-runner -- pytest -s --tb=short -rfp --pyargs clmctest
   when: on_success      
   
diff --git a/docs/AlertsSpecification.md b/docs/AlertsSpecification.md
index 2eae1cc049929da24c7a04f76308ca6ea353a37e..19fcaa971ba926e254258fa2433cfbd1e776a47e 100644
--- a/docs/AlertsSpecification.md
+++ b/docs/AlertsSpecification.md
@@ -62,8 +62,10 @@ topology_template:
     - high_latency_policy:
         type: eu.ict-flame.policies.Alert
         triggers:
-          high_latency:
+          high_latency_batch:
             description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+            metadata:
+              monitoring_type: batch  # the latency is monitored in batch mode, therefore aggregation method and granularity ARE required
             event_type: threshold
             metric: network.latency
             condition:
@@ -77,7 +79,21 @@ topology_template:
               implementation:
                 - flame_sfemc
                 - http://companyA.alert-handler.flame.eu/high-latency
-                
+          high_latency_stream:
+            description: This event triggers when the network latency in a given location exceeds a given threshold (in ms).
+            metadata:
+              monitoring_type: stream  # the latency is monitored in stream mode, therefore aggregation method and granularity ARE NOT required
+            event_type: threshold
+            metric: network.latency
+            condition:
+              threshold: 45
+              resource_type:
+                flame_location: watershed
+              comparison_operator: gt
+            action:
+              implementation:
+                - flame_sfemc
+                - http://companyA.alert-handler.flame.eu/high-latency
     - low_requests_policy:
         type: eu.ict-flame.policies.Alert
         triggers:
@@ -85,6 +101,8 @@ topology_template:
             description: |
               This event triggers when the last reported number of requests for a given service function
               falls behind a given threshold.
+            metadata:
+              monitoring_type: batch
             event_type: threshold
             metric: storage.requests
             condition:
@@ -183,6 +201,8 @@ topology_template:
             triggers:
                 <event identifier>:
                   description: <optional description for the given event trigger>
+                  metadata:  # semantic depends on the event type, deadman alert type doesn't require metadata section
+                    <metadata key>: <metadata value>
                   event_type: <threshold | relative | deadman>
                   metric: <measurement>.<field>
                   condition:
@@ -211,6 +231,11 @@ topology_template:
 
 * **event_type** - the type of TICK Script template to use to create the alert - more information will be provided about the different options here, but we assume the most common one will be **threshold**. Currently, the other supported types are **relative** and **deadman**. These are also the main Kapacitor tasks that can be created through Chronograf. 
 
+* **metadata** - any metadata specific to the event type -
+    * for **threshold** event type, the metadata must contain a field called *monitoring_type* with a *stream* or *batch* value defining the type of monitoring to perform, see details in the relevant section below
+    * for **relative** event type, the metadata must contain a field called *percentage_evaluation* with a *true* or *false* value defining how to compute the difference (raw difference or percentage difference) between the current and the past metric value, see details in the relevant section below
+    * for **deadman** event type, metadata is not required and also not expected, passing a metadata field for this event type will fail validation
+
 * **metric** - the metric to query in InfluxDB, must include measurement name and field name in format `<measurement>`.`<field>`. The only exception is when a **deadman** event type is used - then the `<field>`is not used, but the format is still the same for consistency. Therefore, using `<measurement>.*` will be sufficient.
 
 * **threshold** -
@@ -219,11 +244,11 @@ topology_template:
     * for **deadman** event type, this is the critical value the number of measurement points (received in InfluxDB) is compared to.
 
 * **granularity** - period in seconds
-    * for **threshold** event type, this value specifies how often should Kapacitor query InfluxDB to check whether the alert condition is true.
+    * for **threshold** event type, this value specifies how often should Kapacitor query InfluxDB to check whether the alert condition is true; this is only required when monitoring type is set to batch, when using stream monitoring granularity must not be specified (every measurement point is monitored)
     * for **relative** event type, this value specifies how long back in time to compare the current metric value with
     * for **deadman** event type, this value specifies how long the span in time (in which the number of measurement points are checked) is 
 
-* **aggregation_method** - the aggregation function to use when querying InfluxDB in batch mode, e.g. median, mean, etc. This value is only used when the event_type is set to **threshold** or **relative**.
+* **aggregation_method** - the aggregation function to use when querying InfluxDB in batch mode, e.g. median, mean, etc. This value is only used when the event type is set to **threshold** (and monitoring type is set to batch) or **relative**.
 
     The currently included InfluxQL functions are:
     
@@ -251,12 +276,14 @@ topology_template:
 
 ##### Event types
 
-* **threshold** - A threshold event type is an alert in which Kapacitor queries InfluxDB for a specific metric in a given period of time by using a query function such as *mean*, *median*, *mode*, etc. If the granularity is less than or equal to 60 seconds, then every measurement point is monitored (improving performance), thus, ignoring the aggregation function. This value is then compared against a given threshold. If the result of the comparison operation is true, an alert is triggered. For example:
+* **threshold** - A threshold event type is an alert in which Kapacitor queries InfluxDB for a specific metric in a given period of time by using a query function such as *mean*, *median*, *mode*, etc. This value is then compared against a given threshold. If the result of the comparison operation is true, an alert is triggered. For example:
 
     ```yaml
     high_latency:
         description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
         event_type: threshold
+        metadata:
+          monitoring_type: batch
         metric: network.latency
         condition:
           threshold: 45
@@ -272,6 +299,26 @@ topology_template:
     ``` 
     
     This trigger specification will create an alert task in Kapacitor, which queries the **latency** field in the **network** measurement for location **watershed** every **120** seconds and compares the mean value for the last 120 seconds with the threshold value **45**. If the mean latency exceeds 45 (**gt** operator is used, which stands for **greater than**), an alert is triggered. This alert will be sent through an HTTP POST message to the URLs listed in the **implementation** section.
+    
+    An alternative of the alert above is to use *stream* monitoring which means that every measurement point is monitored rather than querying InfluxDB on a given period. Therefore, when using stream monitoring, granularity and aggregation method are not required. For example:
+    
+    ```yaml
+    high_latency:
+        description: This event triggers when the network latency in a given location exceeds a given threshold (in ms).
+        event_type: threshold
+        metadata:
+          monitoring_type: stream
+        metric: network.latency
+        condition:
+          threshold: 45
+          resource_type:
+            flame_location: watershed
+          comparison_operator: gt
+        action:
+          implementation:
+            - flame_sfemc
+            - http://companyA.alert-handler.flame.eu/high-latency
+    ``` 
 
 * **relative** - A relative event type is an alert in which Kapacitor computes the difference between the current aggregated value of a metric and the aggregated value reported a given period of time ago. The difference between the current and the past value (could be raw difference, i.e. `current - past`, or percentage difference, i.e. `100 * (current - past) / past`) is then compared against a given threshold. If the result of the comparison operation is true, an alert is triggered. For example:
 
diff --git a/docs/clmc-development-guide.md b/docs/clmc-development-guide.md
index cf8b7e95ad3399dcaa07fb9f2dfee78303fb8c2c..b2e2199b3196de8e59b23fff2793b363542c6998 100644
--- a/docs/clmc-development-guide.md
+++ b/docs/clmc-development-guide.md
@@ -207,10 +207,19 @@ The source code is organised in various python subpackages. Each subpackage is t
 
 * **src/service/clmcservice/alertsapi** - the source code of the CLMC alerts API used for managing alerts and trigger notifications
 
-* **src/service/clmcservice/graphapi** - the source code of the CLMC graph API used for calculating round-trip time and performing graph-based measurements
+    * **src/service/clmcservice/alertsapi/alerts_specification_schema.py** - defines the validation schema for alerts specification documents
+    * **src/service/clmcservice/alertsapi/views.py** - defines the API functions of the alerts API, API endpoints are named with identifiers which are then mapped to URLs
+    * **src/service/clmcservice/alertsapi/utilities.py** - utility functions used to fill Kapacitor templates (with data extracted from the alerts specification document)
 
+* **src/service/clmcservice/graphapi** - the source code of the CLMC graph API used for calculating round-trip time and performing graph-based measurements
+    
+    * **src/service/clmcservice/graphapi/views.py** - defines the API functions of the graph API, API endpoints are named with identifiers which are then mapped to URLs
+    * **src/service/clmcservice/graphapi/utilities.py** - utility functions used for interacting with the Neo4j graph
+    
 * **src/service/clmcservice/managementapi** - the source code of the CLMC data management API used for managing (e.g. deleting) the data of a service function chain 
 
+    * **src/service/clmcservice/managementapi/views.py** - defines the API functions of the graph API, API endpoints are named with identifiers which are then mapped to URLs
+
 * **src/service/clmcservice/models** - package for any persistency related code (e.g. object relational mappings)
 
 * **src/service/clmcservice/static** - static files that are required by the CLMC service, e.g. the TOSCA alerts definitions file
@@ -365,3 +374,139 @@ All tests are implemented using *Python* and *Pytest*. The integration tests (lo
 ```bash
 lxc exec test-runner -- pytest -s --tb=short -rfp --pyargs /opt/clmc/src/test/clmctest
 ```
+
+#### Implementation details of the Graph API
+
+The Graph API includes a number of interfaces for managing a graph representing a given service function chain, a.k.a. a media service. This includes endpoints for creating and deleting various parts of the graph as well as running data queries, e.g. round trip time query. Ultimately, we use these to perform graph-based monitoring which includes the following steps:
+
+1) convert from time-series data to graph nodes and relationships
+2) build a temporal graph, that is a graph which represents the status of the media service in a given time range
+3) query the temporal graph for round-trip time measurements
+4) save the new measurements into InfluxDB
+5) delete the temporal graph
+6) repeat after a user-defined time period, e.g. 30 seconds
+
+All of this is automated in what is called the graph pipeline bash script located at **scripts/clmc-service/graph-pipeline.sh**. Whenever graph-based monitoring is requested through the CLMC service API, this script is ran in the background in a child process (the CLMC application server is the parent process). The graph API allows certain level of control operations over this process - start, stop and fetch status.
+
+In current implementation, we still have two manual steps to perform after each platform deployment:
+
+* Edit the **src/service/resources/GraphAPI/network_clusters.json** file (on the CLMC container) which maps the IP address of a service router to the host name of the cluster that is connected to this service router
+* Edit the **src/service/resources/GraphAPI/network_ues.json** file (on the CLMC container) which maps the IP address of a service router to the host name of a user equipment that is connected to this service router
+
+The information for these files can be retrieved by looking into the platform infrastructure slice, e.g. on the sandpit run `openstack server list`. Then, we can see that cluster 20-sr1-cluster1-cluster is connected through 20-sr1-cluster1-sr. Hence in the **network_clusters.json** we need an entry like the following:
+
+```json
+"<ip of 20-sr1-cluster1-sr>": "20-sr1-cluster1-cluster"
+```
+
+Similar for the emulated UEs, ue20 is connected through 20-sr2-sr. Hence in the **network_ues.json** we need an entry like the following:
+
+```json
+"<ip of 20-sr2-sr>": "ue20"
+```
+
+The IP addresses mentioned above must be the IP addresses the service routers receive on the SDN controller network (e.g. flame-sdnctrl)
+
+Afterwards, the platform operator must build the network topology graph by either:
+
+* sending a post request to the graoh API - `curl -X POST http://<clmc-host>/clmc-service/graph/network`
+* running a bash script (on the CLMC container) that continuously rebuilds the network topology, thus updating latency measurements received from the SDN controller - `graph-network-topology.sh &`
+
+Requesting graph-based monitoring then is as simple as sending a JSON configuration to the CLMC service API. See the API documentation for more details on this.
+
+A simple example of how to manage the network topology graph on the sandpit after a platform re-deployment (including the manual steps):
+
+1)	ssh into the flame-provisioner
+2)	run `openstack server list`
+3)	record the mappings between clusters/UEs and IP address (on the SDN controller network, flame-sdnctrl) of the service router that connects them to the platform
+-	example with cluster 20-sr1-cluster1-cluster, it is connected through service router 20-sr1-cluster1-sr which has IP address 172.20.231.22, therefore the mapping here is 172.20.231.22: 20-sr1-cluster1-cluster
+-	example with ue20, it is connected through service router 20-sr2-sr which has IP address 172.20.231.5, therefore the mapping here is 172.20.231.5: ue20 
+4)	ssh into the clmc container using its IP address on the flame-mgmt network, `ssh 172.10.231.17`
+5)	edit the file with the recorded cluster mappings - `vim /opt/clmc/src/service/resources/GraphAPI/network_clusters.json` with the following
+{
+  "172.20.231.22": "20-sr1-cluster1-cluster",
+  "172.20.231.20": "19-sr1-cluster1-cluster",
+  "172.20.231.16": "18-sr1-cluster1-cluster",
+  "172.20.231.17": "17-sr1-cluster1-cluster"
+}
+6)	edit the file with the recorded ue mappings - `vim /opt/clmc/src/service/resources/GraphAPI/network_ues.json` with the following
+{
+  "172.20.231.5": "ue20",
+  "172.20.231.14": "ue19",
+  "172.20.231.19": "ue18",
+  "172.20.231.23": "ue17"
+}
+7)	run an automated bash script to continuously update the network topology graph running in the background - `graph-network-topology.sh &` 
+
+Now to view the actual graph we need the neo4j browser which can be accessed through http://localhost:9001/clmc/neo4j/browser (assuming that the sandpit SSH tunnels have been all set up using the standard way documented in the flame experimenter docs)
+
+Since the Neo4j browser uses an adhoc port number (7687) we need a second tunnel to forward requests from our browsers to the sandpit frontend server then to the CLMC. Achieved through:
+
+` ssh -nNT -L 7687:platform:7687 sandpit &` - same command as the one used for the accessing the platform, only difference in the port numbers
+
+Then by opening the URL above, the Neo4j browser will ask about credentials:
+
+Host: localhost (the port forwarding will forward port 7687 on localhost to port 7687 on the CLMC)
+
+Username: neo4j
+
+Password: admin
+
+Then we can execute Cypher queries against the Neo4j graph, the most useful one: ` MATCH (n) return n;` which will render the full graph.
+
+
+After the network topology graph is built, a media service provider can request graph-based monitoring. An example of how to request this, assuming that the SFC **sandpit-experiment** has been deployed (TOSCA files and LXD image included in sandpit repository in folder itinnov):
+
+JSON configuration:
+
+```json
+{
+  "query_period": 30,
+  "results_measurement_name": "graph_measurements",
+  "service_function_chain": "sandpit-experiment",
+  "service_function_chain_instance": "sandpit-experiment_1",
+  "service_functions": {
+    "sandstorage": {
+      "response_time_field": "(max(processing_time) - min(processing_time)) / ((max(request_count) -min(request_count))*1000)",
+      "request_size_field": "(max(bytes_received) - min(bytes_received)) / (max(request_count) - min(request_count))",
+      "response_size_field": "(max(bytes_sent) - min(bytes_sent)) / (max(request_count) - min(request_count))",
+      "measurement_name": "tomcat_connector"
+    }
+  }
+}
+```
+
+Then, in chronograf we can view measurement called **graph_measurements** which will be populated from the CLMC graph monitoring pipeline.
+
+curl POST request:
+
+`curl -X POST http://localhost:9000/clmc/clmc-service/graph/monitor -d '{"query_period": 30, "results_measurement_name": "graph_measurements", "service_function_chain": "sandpit-experiment", "service_function_chain_instance": "sandpit-experiment_1", "service_functions": {"sandstorage": {"response_time_field": "(max(processing_time) - min(processing_time)) / ((max(request_count) -min(request_count))*1000)", "request_size_field": "(max(bytes_received) - min(bytes_received)) / (max(request_count) - min(request_count))", "response_size_field": "(max(bytes_sent) - min(bytes_sent)) / (max(request_count) - min(request_count))", "measurement_name": "tomcat_connector"}}}'`
+
+#### Implementation details of the Alerts API
+
+The Alerts API is implemented on top of the Kapacitor HTTP API and includes the following main parts:
+
+* Wrapper interfaces for the Kapacitor HTTP API
+* Parsing and validation of a TOSCA-compliant CLMC-specific alerts configuration document
+* Kapacitor task templates
+
+When creating/updating alerts, the following steps are performed (in the specified order):
+
+1) TOSCA alerts configuration document is parsed as a yaml file
+2) yaml content is parsed as a TOSCA document
+3) the TOSCA document is validated against the CLMC alerts specification schema
+4) if a TOSCA resource specification document was passed too, parse it as a TOSCA document and compare both documents for inconsistencies
+5) go through each policy and trigger in the alerts configuration document and extract the data
+6) convert the extracted data from each trigger to a Kapacitor task based on the chosen event type (determines which Kapacitor task template to use)
+7) the Kapacitor task identifier is generated based on the sfc, sfc instance, policy and trigger identifiers and represents a hash of the concatenation of these identifiers 
+8) the Kapacitor topic is named after the Kapacitor task (they share the same identifier)
+9) register all URLs as Kapacitor HTTP handlers for the created Kapacitor topic
+10) the Kapacitor handler identifier is generated as a hash of the concatenated sfc, sfc instance, policy and trigger identifiers with the handler URL
+11) fill in the Kapacitor templates and forward to the Kapacitor HTTP API
+12) return a response which contains the errors (if any) that were encountered while interacting with Kapacitor (e.g. if an alert already exist)
+
+Something that is not mentioned in the steps above is the generation of the SFEMC handler URL, that is every time the alerts configuration document references **flame_sfemc** as an alert handler. SFEMC expects a POST request for a given alert to a URL in the following format: 
+
+`http://<sfemc fqdn>:<sfemc port number>/<sfc identifier>/<policy identifier>/<trigger identifier>`
+
+However, policy and trigger identifiers are the identifiers included in the TOSCA resource specification document since this is the only deployment information that the SFEMC knows (SFEMC doesn't know anything about the CLMC alert management). This is why we have the restriction that **policy identifiers that reference flame_sfemc from the alerts configuration document must match with StateChange policy identifiers from the resource specification document**. Thus, we ensure that the policy identifier in the generated URL is what SFEMC expects. For the trigger identifiers, we ensure that **a trigger name that references flame_sfemc in the alerts configuration must match with an identifier from a `clmc::<trigger name>` constraint from the resource specification document**. Thus, the SFEMC URL is consistently generated with what SFEMC expects as a request URL. The SFC identifier is extracted from the metadata section of the alerts configuration. 
\ No newline at end of file
diff --git a/src/service/VERSION b/src/service/VERSION
index faf16644b2ebd4f9c9013ded49ba054d5fe10a28..35237d8a2fef63869780db5f0c2a9e7f27023e15 100644
--- a/src/service/VERSION
+++ b/src/service/VERSION
@@ -1 +1 @@
-__version__ = "2.4.0"
\ No newline at end of file
+__version__ = "2.4.1"
\ No newline at end of file
diff --git a/src/service/clmcservice/alertsapi/alerts_specification_schema.py b/src/service/clmcservice/alertsapi/alerts_specification_schema.py
index 7b48d43e0aabe10ab1d8b8b3dc52895221b309d9..50097ed6bcb765ec2e7a88c88c1459ca3c1fc0af 100644
--- a/src/service/clmcservice/alertsapi/alerts_specification_schema.py
+++ b/src/service/clmcservice/alertsapi/alerts_specification_schema.py
@@ -42,6 +42,8 @@ from schema import Schema, And, Or, Optional, SchemaError
 
 SFEMC = "flame_sfemc"  # describes the keyword used for the SFEMC alert handler
 
+STREAM_TYPE = "stream"
+BATCH_TYPE = "batch"
 
 # Influx QL functions defined in the documentation https://docs.influxdata.com/influxdb/v1.6/query_language/functions/
 INFLUX_QL_FUNCTIONS = (
@@ -80,14 +82,34 @@ HANDLERS = {
         ]
 }
 
-THRESHOLD_TRIGGER = {
+THRESHOLD_STREAM_TRIGGER = {
     Optional("description"): str,
+    "metadata": {
+        "monitoring_type": STREAM_TYPE
+    },
+    "event_type": "threshold",
+    "metric": And(str, lambda s: len(s.split('.', 1)) == 2),
+    "condition": {
+        "threshold": Or(int, float),
+        Optional("resource_type"): {
+            And(str, lambda tag: tag not in INVALID_TAGS): str
+        },
+        "comparison_operator": And(str, lambda s: s in COMPARISON_OPERATORS)
+    },
+    "action": HANDLERS
+}
+
+THRESHOLD_BATCH_TRIGGER = {
+    Optional("description"): str,
+    "metadata": {
+        "monitoring_type": BATCH_TYPE
+    },
     "event_type": "threshold",
     "metric": And(str, lambda s: len(s.split('.', 1)) == 2),
     "condition": {
         "threshold": Or(int, float),
         "granularity": And(int, lambda p: p > 0),
-        Optional("aggregation_method"): And(str, lambda s: s in INFLUX_QL_FUNCTIONS),  # defaults to "mean"
+        "aggregation_method": And(str, lambda s: s in INFLUX_QL_FUNCTIONS),
         Optional("resource_type"): {
             And(str, lambda tag: tag not in INVALID_TAGS): str
         },
@@ -143,7 +165,7 @@ ALERTS_SPECIFICATION_SCHEMA = Schema({
                 str: {
                     "type": "eu.ict-flame.policies.Alert",
                     "triggers": And({
-                        str: Or(THRESHOLD_TRIGGER, RELATIVE_TRIGGER, DEADMAN_TRIGGER)
+                        str: Or(THRESHOLD_STREAM_TRIGGER, THRESHOLD_BATCH_TRIGGER, RELATIVE_TRIGGER, DEADMAN_TRIGGER)
                     }, lambda triggers: len(triggers) > 0)
                 }
             }
diff --git a/src/service/clmcservice/alertsapi/tests.py b/src/service/clmcservice/alertsapi/tests.py
index c77b74bb0c8b66e819107cece0f86042bafa1868..62a33ccee73c8fdd8d61ab71012e03253b56c7dd 100644
--- a/src/service/clmcservice/alertsapi/tests.py
+++ b/src/service/clmcservice/alertsapi/tests.py
@@ -37,7 +37,7 @@ from toscaparser.tosca_template import ToscaTemplate
 
 # CLMC-service imports
 from clmcservice.alertsapi.utilities import adjust_tosca_definitions_import, SFEMC
-from clmcservice.alertsapi.alerts_specification_schema import validate_clmc_alerts_specification
+from clmcservice.alertsapi.alerts_specification_schema import validate_clmc_alerts_specification, STREAM_TYPE, BATCH_TYPE
 from clmcservice.alertsapi.views import AlertsConfigurationAPI
 from clmcservice import ROOT_DIR
 
@@ -136,7 +136,7 @@ class TestAlertsConfigurationAPI(object):
                 if not isfile(alert_config_abs_path):
                     continue  # skip directories
 
-                print(alert_config_abs_path, valid_expected)
+                print(alert_config_abs_path, "Is valid:", valid_expected)
 
                 with open(alert_config_abs_path, 'r') as fh:
                     yaml_content = load(fh)
@@ -145,6 +145,7 @@ class TestAlertsConfigurationAPI(object):
                 # do not catch exceptions here since we are testing the clmc validator, the tosca parsing is tested in the previous test method
                 alert_tosca_spec = ToscaTemplate(yaml_dict_tpl=yaml_content)
                 valid_real, err = validate_clmc_alerts_specification(alert_tosca_spec.tpl, include_error=True)
+                print("Validation error:\n==========\n{0}\n==========".format(err))
                 assert valid_expected == valid_real, "CLMC alerts specification validator test failed for file: {0}".format(alert_config_abs_path)
 
     def test_alerts_config_api_post(self, app_config):
@@ -615,13 +616,12 @@ def extract_alert_configuration_data(alert_spec, sfemc_fqdn, sfemc_port):
         for trigger in policy.triggers:
             trigger_id = trigger.name
             event_type = trigger.trigger_tpl["event_type"]
-            alert_period_integer = trigger.trigger_tpl["condition"]["granularity"]
 
             topic_id = "{0}\n{1}\n{2}\n{3}".format(sfc, sfc_instance, policy_id, trigger_id)
             topic_id = AlertsConfigurationAPI.get_hash(topic_id)
 
             alert_id = topic_id
-            alert_type = get_alert_type(event_type, alert_period_integer)
+            alert_type = get_alert_type(event_type, trigger.trigger_tpl.get("metadata", {}))
 
             alert_handlers = {}
             for handler_url in trigger.trigger_tpl["action"]["implementation"]:
@@ -638,23 +638,23 @@ def extract_alert_configuration_data(alert_spec, sfemc_fqdn, sfemc_port):
     return sfc, sfc_instance, alerts
 
 
-def get_alert_type(event_type, alert_period):
+def get_alert_type(event_type, trigger_metadata):
     """
     Retrieve the alert type (stream ot batch) based on the event type and alert period.
 
     :param event_type: event type, e.g. threshold, relative, deadman, etc.
-    :param alert_period: the alert period
+    :param trigger_metadata: metadata for this trigger
 
     :return: "batch" or "stream"
     """
 
     if event_type in AlertsConfigurationAPI.DUAL_VERSION_TEMPLATES:
-        if alert_period < AlertsConfigurationAPI.STREAM_PERIOD_LIMIT:
-            return "stream"
-        else:
-            return "batch"
+        assert "monitoring_type" in trigger_metadata, "Dual version trigger type doesn't have monitoring_type field in its metadata"
+        monitoring_type = trigger_metadata.get("monitoring_type")
+        assert monitoring_type in {STREAM_TYPE, BATCH_TYPE}, "Monitoring type must be either stream or batch"
+        return monitoring_type
     else:
-        events = {"relative": "batch", "deadman": "stream"}
+        events = {"relative": BATCH_TYPE, "deadman": STREAM_TYPE}
         return events[event_type]
 
 
diff --git a/src/service/clmcservice/alertsapi/views.py b/src/service/clmcservice/alertsapi/views.py
index 6c7a28c200b52523c52a4c4f5d08a4e1a89c35a2..c27f0db5d8c38b40e5e93f3d1032a4f9e0c353ae 100644
--- a/src/service/clmcservice/alertsapi/views.py
+++ b/src/service/clmcservice/alertsapi/views.py
@@ -36,7 +36,7 @@ from requests import post, get, delete
 
 # CLMC-service imports
 from clmcservice.alertsapi.utilities import adjust_tosca_definitions_import, TICKScriptTemplateFiller, fill_http_post_handler_vars, get_resource_spec_policy_triggers, get_alert_spec_policy_triggers
-from clmcservice.alertsapi.alerts_specification_schema import COMPARISON_OPERATORS, SFEMC, validate_clmc_alerts_specification
+from clmcservice.alertsapi.alerts_specification_schema import STREAM_TYPE, BATCH_TYPE, COMPARISON_OPERATORS, SFEMC, validate_clmc_alerts_specification
 
 # initialise logger
 log = logging.getLogger('service_logger')
@@ -48,8 +48,6 @@ class AlertsConfigurationAPI(object):
     A class-based view for configuring alerts within CLMC.
     """
 
-    STREAM_PERIOD_LIMIT = 60  # if alert period is less than 60 seconds, then a stream template is used, otherwise use batch
-
     DUAL_VERSION_TEMPLATES = {"threshold"}  # this set defines all template types that are written in two versions (stream and batch)
 
     KAPACITOR_TASK_API_PREFIX = "/kapacitor/v1/tasks"
@@ -521,21 +519,26 @@ class AlertsConfigurationAPI(object):
 
                 condition = trigger.trigger_tpl["condition"]
                 critical_value = float(condition["threshold"])
-                alert_period_integer = condition["granularity"]
-                alert_period = "{0}s".format(alert_period_integer)
-                influx_function = condition.get("aggregation_method", "mean")  # if not specified, use "mean"
 
-                # check for tag filtering
-                where_clause = None
+                # get granularity and aggregation method, defaults to None - not every alert type requires these fields
+                alert_period_integer = condition.get("granularity")
+                if alert_period_integer is not None:
+                    alert_period = "{0}s".format(alert_period_integer)
+                else:
+                    alert_period = None
+                influx_function = condition.get("aggregation_method")
+
+                # check for tag filtering (optional, if not specified include the sfc and sfc instance tags only)
+                tags = {}
                 if "resource_type" in trigger.trigger_tpl["condition"]:
                     tags = condition["resource_type"]
-                    # make sure alert tasks are executing with queries for the given sfc and sfc instance - automatically add those tags using the metadata values
-                    tags["flame_sfc"] = sfc
-                    tags["flame_sfci"] = sfc_instance
+                # make sure alert tasks are executing with queries for the given sfc and sfc instance - automatically add those tags using the metadata values
+                tags["flame_sfc"] = sfc
+                tags["flame_sfci"] = sfc_instance
 
-                    # build up the where clause from the tags dictionary
-                    where_filters_list = map(lambda tag_name: '"{0}"=\'{1}\''.format(tag_name, tags[tag_name]), tags)
-                    where_clause = " AND ".join(where_filters_list)
+                # build up the where clause from the tags dictionary
+                where_filters_list = map(lambda tag_name: '"{0}"=\'{1}\''.format(tag_name, tags[tag_name]), tags)
+                where_clause = " AND ".join(where_filters_list)
 
                 comparison_operator = COMPARISON_OPERATORS.get(condition.get("comparison_operator"))  # if not specified, the comparison operator will be set to None
 
@@ -546,10 +549,13 @@ class AlertsConfigurationAPI(object):
 
                 # check whether the template needs to be a stream or a batch
                 if event_type in self.DUAL_VERSION_TEMPLATES:
-                    if alert_period_integer < self.STREAM_PERIOD_LIMIT:
+                    monitoring_type = trigger_metadata.get("monitoring_type")
+                    assert monitoring_type in {STREAM_TYPE, BATCH_TYPE}, "Alerts schema validation failed - dual version alert types should specify monitoring type as stream or batch in the metadata."
+
+                    if monitoring_type == STREAM_TYPE:
                         template_id = "{0}-stream-template".format(event_type)
                         event_type = "{0}_stream".format(event_type)
-                    else:
+                    elif monitoring_type == BATCH_TYPE:
                         template_id = "{0}-batch-template".format(event_type)
                         event_type = "{0}_batch".format(event_type)
 
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-1.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-1.yaml
index 03515cfdd0052e5ae42d6c69e61b28423fc7447d..b6e9768a25e18cd0c91e9e4f724242da44f8bfff 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-1.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-1.yaml
@@ -12,44 +12,6 @@ metadata:
 topology_template:
 
   policies:
-    - high_latency_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          high_latency:
-            description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
-            event_type: threshold
-            metric: network.latency
-            condition:
-              threshold: 45
-              granularity: 120
-              aggregation_method: mean
-              resource_type:
-                flame_location: watershed
-              comparison_operator: gt
-            action:
-              implementation:
-                - flame_sfemc
-                - http://companyA.alert-handler.flame.eu/high-latency
-    - requests_diff_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          increase_in_requests:
-            description: |
-              This event triggers when the number of requests has increased relative to the number of requests received
-              120 seconds ago.
-            event_type: relative
-            metric: storage.requests
-            condition:
-              threshold: 100
-              granularity: 120
-              resource_type:
-                flame_sfp: storage
-                flame_sf: storage-users
-                flame_location: watershed
-              comparison_operator: gte
-            action:
-              implementation:
-              - http://sfemc.flame.eu/notify
     - low_requests_policy:
         type: eu.ict-flame.policies.Alert
         triggers:
@@ -57,6 +19,8 @@ topology_template:
             description: |
               This event triggers when the last reported number of requests for a given service function
               falls behind a given threshold.
+            metadata:
+              monitoring_type: batch
             event_type: threshold
             metric: storage.requests
             condition:
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-10.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-10.yaml
index f17a69769262a100995d3b5ff00f58054217f5b7..1fe844e596314a5621cbafaba641459c29b8bac3 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-10.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-10.yaml
@@ -7,29 +7,10 @@ imports:
 
 metadata:
   sfc_ID: companyA-VR # correct format is servicefunctionchain, not sfc_ID
-#  sfci_ID: companyA-VR-premium # correct format is sfci, not sfci_ID
 
 topology_template:
 
   policies:
-    - high_latency_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          high_latency:
-            description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
-            event_type: threshold
-            metric: network.latency
-            condition:
-              threshold: 45
-              granularity: 120
-              aggregation_method: mean
-              resource_type:
-                flame_location: watershed
-              comparison_operator: gt
-            action:
-              implementation:
-                - flame_sfemc
-                - http://companyA.alert-handler.flame.eu/high-latency
     - requests_diff_policy:
         type: eu.ict-flame.policies.Alert
         triggers:
@@ -40,8 +21,9 @@ topology_template:
             event_type: relative
             metric: storage.requests
             condition:
-              threshold: -100  # requests have decreased by at least 100
+              threshold: -100
               granularity: 120
+              aggregation_method: mean
               resource_type:
                 flame_sfp: storage
                 flame_sf: storage-users
@@ -50,25 +32,3 @@ topology_template:
             action:
               implementation:
               - flame_sfemc
-    - low_requests_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          low_requests:
-            description: |
-              This event triggers when the last reported number of requests for a given service function
-              falls behind a given threshold.
-            event_type: threshold
-            metric: storage.requests
-            condition:
-              threshold: 5
-              granularity: 60
-              aggregation_method: last
-              resource_type:
-                flame_sfp: storage
-                flame_sf: storage-users
-                flame_location: watershed
-              comparison_operator: lt
-            action:
-              implementation:
-                - flame_sfemc
-                - http://companyA.alert-handler.flame.eu/low-requests
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-11.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-11.yaml
index 59bd2f6757e2c8048617474b705e8feee2631bd4..dce58de733976d7427969f6f733882dce92fa0b8 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-11.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-11.yaml
@@ -7,8 +7,6 @@ imports:
 
 metadata:
   servicefunctionchain: companyA-VR
-  sfc: companyA-VR  # correct metadata field is servicefunctionchain
-#  sfci: companyA-VR-premium
 
 topology_template:
 
@@ -18,11 +16,13 @@ topology_template:
         triggers:
           high_latency:
             description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+            metadata:
+              monitoring_type: batch
             event_type: threshold
             metric: network.latency
             condition:
               threshold: 45
-              granularity: 120
+              # batch alert should specify granularity
               aggregation_method: mean
               resource_type:
                 flame_location: watershed
@@ -31,45 +31,3 @@ topology_template:
               implementation:
                 - flame_sfemc
                 - http://companyA.alert-handler.flame.eu/high-latency
-    - requests_diff_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          decrease_in_requests:
-            description: |
-              This event triggers when the number of requests has decreased relative to the number of requests received
-              120 seconds ago.
-            event_type: relative
-            metric: storage.requests
-            condition:
-              threshold: -100  # requests have decreased by at least 100
-              granularity: 120
-              resource_type:
-                flame_sfp: storage
-                flame_sf: storage-users
-                flame_location: watershed
-              comparison_operator: lte
-            action:
-              implementation:
-              - http://sfemc.flame.eu/notify
-    - low_requests_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          low_requests:
-            description: |
-              This event triggers when the last reported number of requests for a given service function
-              falls behind a given threshold.
-            event_type: threshold
-            metric: storage.requests
-            condition:
-              threshold: 5
-              granularity: 60
-              aggregation_method: last
-              resource_type:
-                flame_sfp: storage
-                flame_sf: storage-users
-                flame_location: watershed
-              comparison_operator: lt
-            action:
-              implementation:
-                - http://sfemc.flame.eu/notify
-                - http://companyA.alert-handler.flame.eu/low-requests
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-12.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-12.yaml
index 0c2a659f528a0da4f51700e8e7c7968575d65366..2b2e644d4dd33577632011c4f23b7e2dce51876e 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-12.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-12.yaml
@@ -7,7 +7,6 @@ imports:
 
 metadata:
   servicefunctionchain: companyA-VR
-  sfci: companyA-VR-premium   # not allowed - sfc instance tag is automatically added in the filters
 
 topology_template:
 
@@ -17,12 +16,13 @@ topology_template:
         triggers:
           high_latency:
             description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+            metadata:
+              monitoring_type: stream
             event_type: threshold
             metric: network.latency
             condition:
               threshold: 45
-              granularity: 120
-              aggregation_method: mean
+              granularity: 120  # cannot have a stream trigger with granularity
               resource_type:
                 flame_location: watershed
               comparison_operator: gt
@@ -30,45 +30,3 @@ topology_template:
               implementation:
                 - flame_sfemc
                 - http://companyA.alert-handler.flame.eu/high-latency
-    - requests_diff_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          decrease_in_requests:
-            description: |
-              This event triggers when the number of requests has decreased relative to the number of requests received
-              120 seconds ago.
-            event_type: relative
-            metric: storage.requests
-            condition:
-              threshold: -100  # requests have decreased by at least 100
-              granularity: 120
-              resource_type:
-                flame_sfp: storage
-                flame_sf: storage-users
-                flame_location: watershed
-              comparison_operator: lte
-            action:
-              implementation:
-              - http://sfemc.flame.eu/notify
-    - low_requests_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          low_requests:
-            description: |
-              This event triggers when the last reported number of requests for a given service function
-              falls behind a given threshold.
-            event_type: threshold
-            metric: storage.requests
-            condition:
-              threshold: 5
-              granularity: 60
-              aggregation_method: last
-              resource_type:
-                flame_sfp: storage
-                flame_sf: storage-users
-                flame_location: watershed
-              comparison_operator: lt
-            action:
-              implementation:
-                - http://sfemc.flame.eu/notify
-                - http://companyA.alert-handler.flame.eu/low-requests
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-13.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-13.yaml
index cb3ff637b7288346b27ab0f1af749d959e0057cf..1faf97ad686fcf2dda7ac31d68680098f2c6924f 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-13.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-13.yaml
@@ -30,45 +30,3 @@ topology_template:
               implementation:
                 - flame_sfemc
                 - http://companyA.alert-handler.flame.eu/high-latency
-    - requests_diff_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          decrease_in_requests:
-            description: |
-              This event triggers when the number of requests has decreased relative to the number of requests received
-              120 seconds ago.
-            event_type: relative
-            metric: storage.requests
-            condition:
-              threshold: -100
-              granularity: 120
-              resource_type:
-                flame_sfp: storage
-                flame_sf: storage-users
-                flame_location: watershed
-              comparison_operator: lte
-            action:
-              implementation:
-              - http://sfemc.flame.eu/notify
-    - low_requests_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          low_requests:
-            description: |
-              This event triggers when the last reported number of requests for a given service function
-              falls behind a given threshold.
-            event_type: threshold
-            metric: storage.requests
-            condition:
-              threshold: 5
-              granularity: 60
-              aggregation_method: last
-              resource_type:
-                flame_sfp: storage
-                flame_sf: storage-users
-                flame_location: watershed
-              comparison_operator: lt
-            action:
-              implementation:
-                - http://sfemc.flame.eu/notify
-                - http://companyA.alert-handler.flame.eu/low-requests
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-14.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-14.yaml
index 313203b31eb07090047af168f4688127982ac518..4124cac82a1381849fd6b57552079688856259fb 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-14.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-14.yaml
@@ -11,24 +11,6 @@ metadata:
 topology_template:
 
   policies:
-    - high_latency_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          high_latency:
-            description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
-            event_type: threshold
-            metric: network.latency
-            condition:
-              threshold: 45
-              granularity: 120
-              aggregation_method: mean
-              resource_type:
-                flame_location: watershed
-              comparison_operator: gt
-            action:
-              implementation:
-                - flame_sfemc
-                - http://companyA.alert-handler.flame.eu/high-latency
     - requests_diff_policy:
         type: eu.ict-flame.policies.Alert
         triggers:
@@ -43,6 +25,7 @@ topology_template:
             condition:
               threshold: 5
               granularity: 120
+              aggregation_method: first
               resource_type:
                 flame_sfp: storage
                 flame_sf: storage-users
@@ -51,25 +34,3 @@ topology_template:
             action:
               implementation:
               - http://sfemc.flame.eu/notify
-    - low_requests_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          low_requests:
-            description: |
-              This event triggers when the last reported number of requests for a given service function
-              falls behind a given threshold.
-            event_type: threshold
-            metric: storage.requests
-            condition:
-              threshold: 5
-              granularity: 60
-              aggregation_method: last
-              resource_type:
-                flame_sfp: storage
-                flame_sf: storage-users
-                flame_location: watershed
-              comparison_operator: lt
-            action:
-              implementation:
-                - http://sfemc.flame.eu/notify
-                - http://companyA.alert-handler.flame.eu/low-requests
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-15.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-15.yaml
index c09719754cfb952f8dcb5125796d9ae564588bfd..3b0f3be2ae27dcdf3411649d934f63e2781c0342 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-15.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-15.yaml
@@ -11,24 +11,6 @@ metadata:
 topology_template:
 
   policies:
-    - high_latency_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          high_latency:
-            description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
-            event_type: threshold
-            metric: network.latency
-            condition:
-              threshold: 45
-              granularity: 120
-              aggregation_method: mean
-              resource_type:
-                flame_location: watershed
-              comparison_operator: gt
-            action:
-              implementation:
-                - flame_sfemc
-                - http://companyA.alert-handler.flame.eu/high-latency
     - requests_diff_policy:
         type: eu.ict-flame.policies.Alert
         triggers:
@@ -43,6 +25,7 @@ topology_template:
             condition:
               threshold: 5
               granularity: 120
+              aggregation_method: median
               resource_type:
                 flame_sfp: storage
                 flame_sf: storage-users
@@ -51,25 +34,3 @@ topology_template:
             action:
               implementation:
               - http://sfemc.flame.eu/notify
-    - low_requests_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          low_requests:
-            description: |
-              This event triggers when the last reported number of requests for a given service function
-              falls behind a given threshold.
-            event_type: threshold
-            metric: storage.requests
-            condition:
-              threshold: 5
-              granularity: 60
-              aggregation_method: last
-              resource_type:
-                flame_sfp: storage
-                flame_sf: storage-users
-                flame_location: watershed
-              comparison_operator: lt
-            action:
-              implementation:
-                - http://sfemc.flame.eu/notify
-                - http://companyA.alert-handler.flame.eu/low-requests
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-16.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-16.yaml
index f3026f86f11e0565ad99ca0bfd0ec84891256cba..ee5953a5a638b5b31173d69cd26513d9c006d048 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-16.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-16.yaml
@@ -25,6 +25,7 @@ topology_template:
             condition:
               threshold: 5
               granularity: 120
+              aggregation_method: mode
               resource_type:
                 flame_sfp: storage
                 flame_sf: storage-users
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-17.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-17.yaml
index 966a2f8fc35bb170871e4ba4e747b4e570607a5a..c609c21bdd6386f10f69769156e95fb8f12839f8 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-17.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-17.yaml
@@ -17,6 +17,8 @@ topology_template:
           high_latency:
             description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
             event_type: threshold
+            metadata:
+              monitoring_type: batch
             metric: network.latency
             condition:
               threshold: 45
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-2.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-2.yaml
index b0fe2f0e3f49f2bafd92d578322f076488eae328..98cfb088cc224446c26287d023fb8bc9bea01c80 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-2.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-2.yaml
@@ -7,26 +7,10 @@ imports:
 
 metadata:
   servicefunctionchain: companyA-VR
-#  sfci: companyA-VR-premium
 
 topology_template:
 
   policies:
-    - missing_measurement_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          missing_storage_measurements:
-            description: This event triggers when the number of storage measurements reported falls below the threshold value.
-            event_type: deadman
-            metric: storage.*
-            condition:
-              threshold: 0
-              granularity: 60
-              resource_type:
-                flame_sfp: storage
-            action:
-              implementation:
-                - flame_sfemc
     - low_requests_policy:
         type: eu.ict-flame.policies.Alert
         triggers:
@@ -34,6 +18,8 @@ topology_template:
             description: |
               This event triggers when the last reported number of requests for a given service function
               falls behind a given threshold.
+            metadata:
+              monitoring_type: batch
             event_type: threshold
             metric: storage.requests
             condition:
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-3.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-3.yaml
index 24890361628a94e1a86e9a23c96fc52a57fdbe62..67eb4407fc39a34eeb3378c17491687bd9bdc2a1 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-3.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-3.yaml
@@ -7,7 +7,6 @@ imports:
 
 metadata:
   servicefunctionchain: companyA-VR
-#  sfci: companyA-VR-premium
 
 topology_template:
 
@@ -19,12 +18,12 @@ topology_template:
             description: |
               This event triggers when the last reported number of requests for a given service function
               falls behind a given threshold.
+            metadata:
+              monitoring_type: stream
             event_type: threshold
             metric: storage.requests
             condition:
               threshold: 5
-              granularity: 60
-              aggregation_method: last
               resource_type:
                 status_code: 200 # integers must be quoted as strings when used as tag values, e.g. '200'
                 flame_sf: storage-users
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-4.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-4.yaml
index afc91e9ceb9900d22c32c563fd932cfb4e0c6e2e..27ba984c7d00b1695659ba11cc69bccd494db8d7 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-4.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-4.yaml
@@ -7,7 +7,6 @@ imports:
 
 metadata:
   servicefunctionchain: companyA-VR
-#  sfci: companyA-VR-premium
 
 topology_template:
 
@@ -19,7 +18,7 @@ topology_template:
             description: |
               This event triggers when the last reported number of requests for a given service function
               falls behind a given threshold.
-            event_type: threshold
+            event_type: relative
             metric: storage.requests
             condition:
               threshold: 5
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-5.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-5.yaml
index bab46dac841d5b5072bbe8137f3449113514f126..55546d32b86dc85b99617d8a2ab8ea9454c23543 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-5.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-5.yaml
@@ -7,7 +7,6 @@ imports:
 
 metadata:
   servicefunctionchain: companyA-VR
-#  sfci: companyA-VR-premium
 
 topology_template:
 
@@ -19,12 +18,11 @@ topology_template:
             description: |
               This event triggers when the last reported number of requests for a given service function
               falls behind a given threshold.
-            event_type: threshold
-            metric: storage.requests
+            event_type: deadman
+            metric: storage.*
             condition:
               threshold: 5
               granularity: 60
-              aggregation_method: mean
               resource_type:
                 flame_sfp: storage
                 flame_sf: storage-users
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-6.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-6.yaml
index 51ddff4a6eaeffee3458362e75e49c7d5883af06..c5c3d5b74154e170bea1f33df278622a5199d1b4 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-6.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-6.yaml
@@ -7,7 +7,6 @@ imports:
 
 metadata:
   servicefunctionchain: companyA-VR
-#  sfci: companyA-VR-premium
 
 topology_template:
 
@@ -17,6 +16,8 @@ topology_template:
         triggers:
           high_latency:
             description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+            metadata:
+              monitoring_type: batch
             event_type: threshold
             metric: network-latency  # should be in format measurement.field - e.g. network.latency
             condition:
@@ -30,25 +31,4 @@ topology_template:
             action:
               implementation:
                 - flame_sfemc
-                - http://companyA.alert-handler.flame.eu/high-latency
-    - low_requests_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          low_requests:
-            description: |
-              This event triggers when the last reported number of requests for a given service function
-              falls behind a given threshold.
-            event_type: threshold
-            metric: storage.requests
-            condition:
-              threshold: 5
-              granularity: 60
-              aggregation_method: last
-              resource_type:
-                flame_sfp: storage
-                flame_sf: storage-users
-                flame_location: watershed
-              comparison_operator: lt
-            action:
-              implementation:
                 - http://companyA.alert-handler.flame.eu/high-latency
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-7.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-7.yaml
index e411c31f34c08b50f10e44aaf0a0998b4841bf1b..041457fbffe4dcf9a1d1a05dcfaaf34a1a77a8df 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-7.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-7.yaml
@@ -7,7 +7,6 @@ imports:
 
 metadata:
   servicefunctionchain: companyA-VR
-#  sfci: companyA-VR-premium
 
 topology_template:
 
@@ -16,8 +15,7 @@ topology_template:
         type: eu.ict-flame.policies.Alert
         triggers:
           high_latency:
-            description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
-            event_type: threshold-template # should be one of ("threshold", "relative", "deadman")
+            event_type: relative-template # should be one of ("threshold", "relative", "deadman")
             metric: network.latency
             condition:
               threshold: 45
@@ -30,40 +28,3 @@ topology_template:
               implementation:
                 - flame_sfemc
                 - http://companyA.alert-handler.flame.eu/high-latency
-    - low_requests_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          low_requests:
-            description: |
-              This event triggers when the last reported number of requests for a given service function
-              falls behind a given threshold.
-            event_type: threshold
-            metric: storage.requests
-            condition:
-              threshold: 5
-              granularity: 60
-              aggregation_method: last
-              resource_type:
-                flame_sfp: storage
-                flame_sf: storage-users
-                flame_location: watershed
-              comparison_operator: lt
-            action:
-              implementation:
-                - flame_sfemc
-                - http://companyA.alert-handler.flame.eu/low-requests
-    - missing_measurement_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          missing_storage_measurements:
-            description: This event triggers when the number of storage measurements reported falls below the threshold value.
-            event_type: deadman
-            metric: storage.*
-            condition:
-              threshold: 0
-              granularity: 60
-              resource_type:
-                flame_sfp: storage
-            action:
-              implementation:
-              - http://sfemc.flame.eu/notify
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-8.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-8.yaml
index ab7db8a692215e6e8e9abff5085b75d682d754b4..5eb9abf7885c091e197421f12a2a7328b7646df4 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-8.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-8.yaml
@@ -7,7 +7,6 @@ imports:
 
 metadata:
   servicefunctionchain: companyA-VR
-#  sfci: companyA-VR-premium
 
 topology_template:
 
@@ -15,63 +14,4 @@ topology_template:
     - high_latency_policy:
         type: eu.ict-flame.policies.Alert
         triggers:
-          # should specify at least 1 trigger
-    - low_requests_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          low_requests:
-            description: |
-              This event triggers when the last reported number of requests for a given service function
-              falls behind a given threshold.
-            event_type: threshold
-            metric: storage.requests
-            condition:
-              threshold: 5
-              granularity: 60
-              aggregation_method: last
-              resource_type:
-                flame_sfp: storage
-                flame_sf: storage-users
-                flame_location: watershed
-              comparison_operator: lt
-            action:
-              implementation:
-                - flame_sfemc
-                - http://companyA.alert-handler.flame.eu/low-requests
-    - requests_diff_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          increase_in_requests:
-            description: |
-              This event triggers when the number of requests has increased relative to the number of requests received
-              120 seconds ago.
-            event_type: relative
-            metric: storage.requests
-            condition:
-              threshold: 100  # requests have increased by at least 100
-              granularity: 120
-              resource_type:
-                flame_sfp: storage
-                flame_sf: storage-users
-                flame_location: watershed
-              comparison_operator: gte
-            action:
-              implementation:
-              - http://sfemc.flame.eu/notify
-          decrease_in_requests:
-            description: |
-              This event triggers when the number of requests has decreased relative to the number of requests received
-              120 seconds ago.
-            event_type: relative
-            metric: storage.requests
-            condition:
-              threshold: -100  # requests have decreased by at least 100
-              granularity: 120
-              resource_type:
-                flame_sfp: storage
-                flame_sf: storage-users
-                flame_location: watershed
-              comparison_operator: lte
-            action:
-              implementation:
-              - flame_sfemc
\ No newline at end of file
+          # should specify at least 1 trigger
\ No newline at end of file
diff --git a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-9.yaml b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-9.yaml
index 15e50edd9a60ae250dd597de2c59c5b0654ad825..8a56d5436b0fe8de85699bc4d593327730dcc83e 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-9.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/invalid/alerts_test_config-9.yaml
@@ -10,24 +10,6 @@ imports:
 topology_template:
 
   policies:
-    - high_latency_policy:
-        type: eu.ict-flame.policies.Alert
-        triggers:
-          high_latency:
-            description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
-            event_type: threshold
-            metric: network.latency
-            condition:
-              threshold: 45
-              granularity: 120
-              aggregation_method: mean
-              resource_type:
-                flame_location: watershed
-              comparison_operator: gt
-            action:
-              implementation:
-                - flame_sfemc
-                - http://companyA.alert-handler.flame.eu/high-latency
     - low_requests_policy:
         type: eu.ict-flame.policies.Alert
         triggers:
@@ -35,6 +17,8 @@ topology_template:
             description: |
               This event triggers when the last reported number of requests for a given service function
               falls behind a given threshold.
+            metadata:
+              monitoring_type: batch
             event_type: threshold
             metric: storage.requests
             condition:
diff --git a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-1.yaml b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-1.yaml
index 750a0740178cc4f8f1d69f3efdd817f419466f26..9af108e28676792ba41e9860be51ef439656e86a 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-1.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-1.yaml
@@ -39,6 +39,8 @@ topology_template:
             description: |
               This event triggers when the last reported number of requests for a given service function
               falls behind a given threshold.
+            metadata:
+              monitoring_type: batch
             event_type: threshold
             metric: storage.requests
             condition:
diff --git a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-2.yaml b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-2.yaml
index 47007c7b1ae8d801c4da524b5b7a8892c26f0659..db8cb626a43f45841a2291815bc551241ea7124a 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-2.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-2.yaml
@@ -16,11 +16,12 @@ topology_template:
         triggers:
           high_latency:
             # optional description - hence, valid
+            metadata:
+              monitoring_type: stream
             event_type: threshold
             metric: network.latency
             condition:
               threshold: 45
-              granularity: 120
               resource_type:
                 flame_location: watershed
               comparison_operator: eq
diff --git a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-3.yaml b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-3.yaml
index 27f3d72fc487f220e72c7d750d54bb148455a92e..5f22eb28ed25c21dbf0ee6a4b8768f74860aaecf 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-3.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-3.yaml
@@ -16,12 +16,14 @@ topology_template:
         triggers:
           high_latency:
             # optional description - hence, valid
+            metadata:
+              monitoring_type: stream
             event_type: threshold
             metric: network.latency
             condition:
               threshold: 45
-              granularity: 120
-              # aggregation_method is optional, default value is "mean"
+              # granularity is not needed when monitoring type is set to stream
+              # aggregation_method is not needed when monitoring type is set to stream
               resource_type:
                 flame_location: watershed
               comparison_operator: neq
@@ -34,6 +36,8 @@ topology_template:
         triggers:
           low_requests:
             # optional description - hence, valid
+            metadata:
+              monitoring_type: batch
             event_type: threshold
             metric: storage.requests
             condition:
diff --git a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-4.yaml b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-4.yaml
index d5ae1608c954a4bf293d89114e774eec239cea66..7fa77c8bbd3811e794914c17fc2e1b5cb63c134a 100644
--- a/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-4.yaml
+++ b/src/service/resources/tosca/test-data/clmc-validator/valid/alerts_test_config-4.yaml
@@ -16,6 +16,8 @@ topology_template:
         triggers:
           high_latency:
             # optional description - hence, valid
+            metadata:
+              monitoring_type: batch
             event_type: threshold
             metric: network.latency
             condition:
@@ -33,6 +35,8 @@ topology_template:
         triggers:
           low_requests:
             # optional description - hence, valid
+            metadata:
+              monitoring_type: batch
             event_type: threshold
             metric: storage.requests
             condition:
diff --git a/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-1.yaml b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-1.yaml
index ec4238022ff60c0a6da4c15563af2463d964d6ed..b9423cad7e64002d9fedc8ad6257ee290a8ad1f7 100644
--- a/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-1.yaml
+++ b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-1.yaml
@@ -17,6 +17,8 @@ topology_template:
         triggers:
           high_latency:
             description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+            metadata:
+              monitoring_type: batch
             event_type: threshold
             metric: network.latency
             condition:
diff --git a/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-3.yaml b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-3.yaml
index b11a1d1d5fec39536b24566b5d217df5e127a824..2eca25e127e143fa358e37e679984c11e70a8bd9 100644
--- a/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-3.yaml
+++ b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-3.yaml
@@ -17,12 +17,12 @@ topology_template:
         triggers:
           high_latency:
             description: This event triggers when the mean network latency in a given location exceeds a given threshold (in ms).
+            metadata:
+              monitoring_type: stream
             event_type: threshold
             metric: network.latency
             condition:
               threshold: 45
-              granularity: 120
-              aggregation_method: mean
               comparison_operator: gt
             action:
               implementation:
diff --git a/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-4.yaml b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-4.yaml
index 9e4d7a50627874966f6756d84b179f16d7b9ff51..1fdc35b2fedb654409f172f0d53d4f8c622f1829 100644
--- a/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-4.yaml
+++ b/src/service/resources/tosca/test-data/tosca-parser/valid/alerts_test_config-4.yaml
@@ -38,6 +38,8 @@ topology_template:
         type: eu.ict-flame.policies.Alert
         triggers:
           increase_in_requests:
+            metadata:
+              percentage_evaluation: true
             event_type: relative
             metric: storage.requests
             condition:
diff --git a/src/test/VERSION b/src/test/VERSION
index faf16644b2ebd4f9c9013ded49ba054d5fe10a28..35237d8a2fef63869780db5f0c2a9e7f27023e15 100644
--- a/src/test/VERSION
+++ b/src/test/VERSION
@@ -1 +1 @@
-__version__ = "2.4.0"
\ No newline at end of file
+__version__ = "2.4.1"
\ No newline at end of file
diff --git a/src/test/clmctest/alerts/alerts_test_config.yaml b/src/test/clmctest/alerts/alerts_test_config.yaml
index 7bf3407128fc7e386d4e308d68b8513a047da9b0..238355c2b169cdba56c55c5dd3a2dbe3773dba6b 100644
--- a/src/test/clmctest/alerts/alerts_test_config.yaml
+++ b/src/test/clmctest/alerts/alerts_test_config.yaml
@@ -34,12 +34,12 @@ topology_template:
                 - http://172.40.231.200:9999/
           increase_in_running_processes:
             description: This event triggers when the max number of running processes increases.
+            metadata:
+              monitoring_type: stream
             event_type: threshold
             metric: processes.running
             condition:
               threshold: 0
-              granularity: 10
-              aggregation_method: max
               resource_type:
                 flame_location: DC1
                 flame_sfp: nginx
@@ -48,6 +48,23 @@ topology_template:
               implementation:
                 - flame_sfemc
                 - http://172.40.231.200:9999/
+          increase_in_running_processes_batch:
+            description: This event triggers when the max number of running processes increases. Trigger uses batch mode for monitoring.
+            metadata:
+              monitoring_type: batch
+            event_type: threshold
+            metric: processes.running
+            condition:
+              threshold: 0
+              granularity: 30
+              aggregation_method: max
+              resource_type:
+                flame_location: DC1
+                flame_sfp: nginx
+              comparison_operator: gte
+            action:
+              implementation:
+                - http://172.40.231.200:9999/
           increase_in_active_requests:
             description: This event triggers when the number of nginx accept requests increases.
             event_type: relative
diff --git a/src/test/clmctest/alerts/test_alerts.py b/src/test/clmctest/alerts/test_alerts.py
index e87b8255dda0bdb4e963bc11117341ce44e4aa55..467c0969a40ee4adc7fffc93a7f0b03b8e3b3e87 100644
--- a/src/test/clmctest/alerts/test_alerts.py
+++ b/src/test/clmctest/alerts/test_alerts.py
@@ -59,6 +59,12 @@ expected_alerts_list = [
      "task_api_endpoint": "/kapacitor/v1/tasks/f5edaeb27fb847116be749c3815d240cbf0d7ba79aee1959daf0b3445a70f2c8",
      "topic_api_endpoint": "/kapacitor/v1/alerts/topics/f5edaeb27fb847116be749c3815d240cbf0d7ba79aee1959daf0b3445a70f2c8",
      "topic_handlers_api_endpoint": "/kapacitor/v1/alerts/topics/f5edaeb27fb847116be749c3815d240cbf0d7ba79aee1959daf0b3445a70f2c8/handlers"},
+    {"policy": "scale_nginx_policy", "trigger": "increase_in_running_processes_batch", "task_identifier": "0b5a63415f4e38e3bad89cf9f8c9094f7ef59497800154432283f3e193776ce3",
+     "handlers": ["http://172.40.231.200:9999/"],
+     "topic_identifier": "0b5a63415f4e38e3bad89cf9f8c9094f7ef59497800154432283f3e193776ce3",
+     "task_api_endpoint": "/kapacitor/v1/tasks/0b5a63415f4e38e3bad89cf9f8c9094f7ef59497800154432283f3e193776ce3",
+     "topic_api_endpoint": "/kapacitor/v1/alerts/topics/0b5a63415f4e38e3bad89cf9f8c9094f7ef59497800154432283f3e193776ce3",
+     "topic_handlers_api_endpoint": "/kapacitor/v1/alerts/topics/0b5a63415f4e38e3bad89cf9f8c9094f7ef59497800154432283f3e193776ce3/handlers"},
     {"policy": "deadman_policy", "trigger": "no_measurements", "task_identifier": "f7dab6fd53001c812d44533d3bbb6ef45f0d1d39b9441bc3c60402ebda85d320",
      "handlers": [SFEMC, "http://172.40.231.200:9999/"],
      "topic_identifier": "f7dab6fd53001c812d44533d3bbb6ef45f0d1d39b9441bc3c60402ebda85d320",
@@ -73,6 +79,11 @@ expected_alerts_list = [
      "topic_handlers_api_endpoint": "/kapacitor/v1/alerts/topics/2707cb9c0397c1aae0f831d5893aa769c6eaeb8834c974f2c14eb2c60be5bd73/handlers"}
 ]
 
+expected_deleted_alerts = [{"policy": alert_object["policy"], "trigger": alert_object["trigger"]} for alert_object in expected_alerts_list]
+expected_deleted_alerts = sorted(expected_deleted_alerts, key=lambda x: x['trigger'])
+expected_deleted_handlers = [{"policy": alert_object["policy"], "trigger": alert_object["trigger"], "handler": handler} for alert_object in expected_alerts_list for handler in alert_object["handlers"]]
+expected_deleted_handlers = sorted(expected_deleted_handlers, key=lambda x: (x['handler'], x['trigger']))
+
 
 def is_valid_timestamp(str_timestamp):
     try:
@@ -228,8 +239,11 @@ class TestAlerts(object):
             assert response.status_code == 200
             sleep(0.25)
 
-        print("Waiting for Kapacitor to trigger alerts, timeouts in 90 seconds...")
         counter = 0
+        # allow 120 seconds to pass before failing due to timeout
+        counter_limit = 24
+        sleep_period = 5
+        print("Waiting for Kapacitor to trigger alerts, timeouts in {0} seconds...".format(counter_limit * sleep_period))
 
         expected_alerts_count = len(expected_alerts_list)
         while True:
@@ -237,11 +251,11 @@ class TestAlerts(object):
             print("Checking count of alert log files: actual count {0}, expected count {1}. Created alert logs: {2}".format(len(alert_logs), expected_alerts_count, alert_logs))
             if len(alert_logs) == expected_alerts_count:
                 break
-            elif counter == 18:
-                print("90 seconds have passed...")
+            elif counter == counter_limit:
+                print("{0} seconds have passed...".format(counter_limit * sleep_period))
                 break
 
-            sleep(5)
+            sleep(sleep_period)
             counter += 1
 
         alert_logs = listdir(LOG_TEST_FOLDER_PATH)
@@ -273,20 +287,9 @@ class TestAlerts(object):
 
         json_response = response.json()
         # sort by trigger to ensure comparison order is correct
-        assert sorted(json_response["deleted_alerts"], key=lambda x: x['trigger']) == [{"policy": "scale_nginx_policy", "trigger": "high_requests"}, {"policy": "scale_nginx_policy", "trigger": "increase_in_active_requests"},
-                                                                                       {"policy": "scale_nginx_policy", "trigger": "increase_in_running_processes"}, {"policy": "deadman_policy", "trigger": "no_measurements"},
-                                                                                       {"policy": "deadman_policy", "trigger": "service_started"}], \
-            "Incorrect list of deleted alerts"
+        assert sorted(json_response["deleted_alerts"], key=lambda x: x['trigger']) == expected_deleted_alerts, "Incorrect list of deleted alerts"
         # sort by handler and trigger to ensure comparison order is correct
-        assert sorted(json_response["deleted_handlers"], key=lambda x: (x['handler'], x['trigger'])) == [{"policy": "scale_nginx_policy", "trigger": "increase_in_active_requests", "handler": SFEMC},
-                                                                                                         {"policy": "scale_nginx_policy", "trigger": "increase_in_running_processes", "handler": SFEMC},
-                                                                                                         {"policy": "deadman_policy", "trigger": "no_measurements", "handler": SFEMC},
-                                                                                                         {"policy": "scale_nginx_policy", "trigger": "high_requests", "handler": "http://172.40.231.200:9999/"},
-                                                                                                         {"policy": "scale_nginx_policy", "trigger": "increase_in_active_requests", "handler": "http://172.40.231.200:9999/"},
-                                                                                                         {"policy": "scale_nginx_policy", "trigger": "increase_in_running_processes", "handler": "http://172.40.231.200:9999/"},
-                                                                                                         {"policy": "deadman_policy", "trigger": "no_measurements", "handler": "http://172.40.231.200:9999/"},
-                                                                                                         {"policy": "deadman_policy", "trigger": "service_started", "handler": "http://172.40.231.200:9999/"}], \
-            "Incorrect list of deleted handlers"
+        assert sorted(json_response["deleted_handlers"], key=lambda x: (x['handler'], x['trigger'])) == expected_deleted_handlers, "Incorrect list of deleted handlers"
 
     def test_alerts_update_request(self, rspec_config):
         """