Skip to content
Snippets Groups Projects
Commit dd658d34 authored by Michael Boniface's avatar Michael Boniface
Browse files

Merge branch 'pytest' into 'integration'

Pytest

See merge request FLAME/flame-clmc!23
parents f0cd015d e6106461
No related branches found
No related tags found
No related merge requests found
#!/bin/bash
#/////////////////////////////////////////////////////////////////////////
#//
#// (c) University of Southampton IT Innovation Centre, 2017
#//
#// Copyright in this software belongs to University of Southampton
#// IT Innovation Centre of Gamma House, Enterprise Road,
#// Chilworth Science Park, Southampton, SO16 7NS, UK.
#//
#// This software may not be used, sold, licensed, transferred, copied
#// or reproduced in whole or in part in any manner or form or in or
#// on any media by any person other than in accordance with the terms
#// of the Licence Agreement supplied with the software, or otherwise
#// without the prior written consent of the copyright owners.
#//
#// This software is distributed WITHOUT ANY WARRANTY, without even the
#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
#// PURPOSE, except where stated in the Licence Agreement supplied with
#// the software.
#//
#// Created By : Michael Boniface
#// Created Date : 19/03/2018
#// Created for Project : FLAME
#//
#/////////////////////////////////////////////////////////////////////////
TELEGRAF_CONF_DIR="/etc/telegraf"
TELEGRAF_CONF_FILE=$TELEGRAF_CONF_DIR"/telegraf.conf"
TELEGRAF_INCLUDE_CONF_DIR=$TELEGRAF_CONF_DIR"/telegraf.d"
TELEGRAF_OUTPUT_CONF_FILE=$TELEGRAF_INCLUDE_CONF_DIR"/telegraf_output.conf"
echo "Checking Telegraf installation"
# Check the target telegraf directory exists
if [ ! -d "$TELEGRAF_CONF_DIR" ]; then
echo "Error: Telegraf conf directory does not exist on target machine. Check that telegraf is installed "$TELEGRAF_CONF_DIR
exit 1
fi
# Check the target telegraf directory exists
if [ ! -d $TELEGRAF_INCLUDE_CONF_DIR ]; then
echo "Error: Telegraf conf include directory does not exist on target machine. Check that telegraf is installed "$TELEGRAF_INCLUDE_CONF_DIR
exit 1
fi
# Copy configuration
echo "Telegraf general config file: " $TELEGRAF_CONF_FILE
(cat <<'EOF'
[global_tags]
location="$LOCATION"
sfc="$SFC_ID"
sfc_i="$SFC_ID_INSTANCE"
sf="$SF_ID"
sf_i="$SF_ID_INSTANCE"
ipendpoint="$IP_ENDPOINT_ID"
[agent]
interval = "10s"
round_interval = true
metric_buffer_limit = 1000
flush_buffer_when_full = true
collection_jitter = "0s"
flush_interval = "10s"
flush_jitter = "0s"
debug = false
quiet = false
logfile = "/var/log/telegraf/telegraf.log"
hostname = ""
EOF
) > $TELEGRAF_CONF_FILE
echo "Telegraf output config file: " $TELEGRAF_OUTPUT_CONF_FILE
(cat <<'EOF'
[[outputs.influxdb]]
urls = ["$INFLUXDB_URL"]
database = "$DATABASE_NAME"
precision = "s"
timeout = "5s"
EOF
) > $TELEGRAF_OUTPUT_CONF_FILE
......@@ -99,7 +99,7 @@ Press the Data Explorer in the menu and select the nginx measurement and create
## KPI triggers
In this demonstrator an example KPI rule has been set up in Kapacitor which fires when the average requests per second on the Apache 1 or Apache2 server goes above certain thresholds ( a 'warning' at 0.2 requests/second and a 'critical' message at 0.5 requests/second ). The TICKscript specification for this rule is as follows:
In this demonstrator an example KPI rule has been set up in Kapacitor which fires when the average number of active connections per 5 seconds on the Nginx 1 or Nginx 2 server goes above certain thresholds ( a 'warning' at 10 connections/5 seconds ). The TICKscript specification for this rule is as follows:
```
dbrp "CLMCMetrics"."autogen"
......
......@@ -5,10 +5,11 @@ from time import sleep
from queue import Queue
from xml.etree import ElementTree
from urllib.parse import urljoin
from os.path import isfile
from os.path import isfile, dirname, join
from os import remove, system
import pytest
import requests
import json
class TestStreamingAlerts(object):
......@@ -16,17 +17,30 @@ class TestStreamingAlerts(object):
A testing class used to group all the tests related to the streaming scenario.
"""
@pytest.mark.parametrize("log", ["/tmp/RPSLoad.log"])
def test_alerts(self, log, streaming_url, streaming_manifest):
kapacitor_url = "http://localhost:9092/kapacitor/v1/tasks"
@pytest.mark.parametrize("rule, log", [
("rules.json", "/tmp/RPSLoad.log"),
])
def test_alerts(self, rule, log, streaming_url, streaming_manifest):
"""
This test case generates some streaming requests to the server to ensure an alert is triggered and then tests the log file for this alert. Different logs can be tested by
appending to the list of parameters in the pytest decorator
appending to the list of parameters in the pytest decorator.
Format for pytest parameters under test:
([filename], [log])
where [filename] is the name of the json file for the rule under test (must be in the same folder as this test is)
[log] is the absolute path of the log file that must be created due to an alert
:param rule: the name of the rule json file
:param log: the path of the log file that is under test
:param streaming_url: the fixture providing the streaming url for this test case
:param streaming_manifest: the fixture providing the root of the XML streaming manifest
"""
kapacitor_setter = self.kapacitor_setting(rule)
next(kapacitor_setter) # Setup the test rule
try:
if isfile(log):
remove(log) # delete log file if existing from previous tests
......@@ -57,6 +71,30 @@ class TestStreamingAlerts(object):
print("\nSuccessfully passed alert creation test.\n")
next(kapacitor_setter) # Teardown the test rule
def kapacitor_setting(self, rule):
"""
A generator function used to provide setUp/tearDown actions for a particular kapacitor rule.
On setUp rule is initialized, on tearDown rule is deleted. Interleaving is achieved using the generator pattern.
:param rule: the name of the json file for the rule under test
"""
# Initialization of the kapacitor rule - Test setUp (UnitTest style)
with open(join(dirname(__file__), rule), "r") as rule_file:
data = "".join(line.strip() for line in rule_file.readlines())
rule_data = json.loads(data)
requests.delete(url=urljoin(self.kapacitor_url + "/", rule_data.get("id"))) # delete in case of a task with the same ID already set in the kapacitor
requests.post(url=self.kapacitor_url, data=data, headers={"Content-Type": "application/json"})
yield
# Deleting the kapacitor rule used for testing - Test tearDown (UnitTest style)
requests.delete(url=urljoin(self.kapacitor_url + "/", rule_data.get("id")))
yield
@staticmethod
@pytest.fixture(scope="class", params=[{"server": "http://192.168.50.11", "video": "/test_video/stream.mpd"}])
def streaming_url(request):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment