From 6fa8a0e9571ae94328f651109cf2dfa7ce5fda43 Mon Sep 17 00:00:00 2001 From: MJB <mjb@it-innovation.soton.ac.uk> Date: Sun, 25 Mar 2018 16:50:20 +0100 Subject: [PATCH] Restructure pytest package to use correct module names and provide setup.py to build a python package distribution, needed to more effectively reuse tests in the platform project build/test process --- .githooks/post-commit | 2 + MANIFEST.in | 2 + clmctest/__init__.py | 1 + clmctest/inputs/__init__.py | 1 + clmctest/inputs/conftest.py | 34 + clmctest/inputs/rspec.yml | 103 +++ clmctest/inputs/test_rspec.py | 43 ++ clmctest/inputs/test_telegraf_agents.py | 76 ++ clmctest/monitoring/LineProtocolGenerator.py | 300 ++++++++ clmctest/monitoring/StreamingSim.py | 292 ++++++++ clmctest/monitoring/__init__.py | 1 + clmctest/monitoring/conftest.py | 36 + clmctest/monitoring/rspec.yml | 52 ++ clmctest/monitoring/test_rspec.py | 40 + clmctest/monitoring/test_simresults.py | 49 ++ .../scripts/.pytest_cache/v/cache/lastfailed | 3 + clmctest/scripts/__init__.py | 1 + clmctest/scripts/rspec.yml | 7 + clmctest/scripts/test_config_telegraf.py | 88 +++ clmctest/services/apache/install.sh | 33 + clmctest/services/apache/telegraf_apache.conf | 19 + clmctest/services/ffmpeg/install.sh | 15 + clmctest/services/ffmpeg/telegraf_ffmpeg.conf | 15 + clmctest/services/ffmpeg/transcode.sh | 34 + clmctest/services/host/install.sh | 28 + clmctest/services/host/telegraf_host.conf | 80 ++ clmctest/services/ipendpoint/install.sh | 28 + .../ipendpoint/telegraf_ipendpoint.conf | 15 + .../services/loadtest-streaming/install.sh | 30 + .../telegraf_loadtest_streaming.conf | 112 +++ clmctest/services/mongo/install.sh | 33 + clmctest/services/mongo/telegraf_mongo.conf | 15 + clmctest/services/nginx/install.sh | 52 ++ clmctest/services/nginx/nginx.conf | 14 + clmctest/services/nginx/telegraf_nginx.conf | 7 + clmctest/services/pytest/install.sh | 30 + clmctest/services/vlc/install.sh | 29 + clmctest/streaming/__init__.py | 1 + clmctest/streaming/conftest.py | 20 + clmctest/streaming/dashboard.json | 1 + clmctest/streaming/influx.json | 7 + clmctest/streaming/kapacitor.conf | 699 ++++++++++++++++++ clmctest/streaming/kapacitor.json | 6 + clmctest/streaming/manual.md | 146 ++++ clmctest/streaming/report.sh | 25 + clmctest/streaming/rspec.yml | 64 ++ clmctest/streaming/rules.json | 9 + clmctest/streaming/run.sh | 57 ++ clmctest/streaming/setupCLMC.sh | 50 ++ clmctest/streaming/setupNGINX.sh | 41 + clmctest/streaming/stop.sh | 4 + clmctest/streaming/test_rspec.py | 41 + clmctest/streaming/test_streaming.py | 177 +++++ setup.py | 21 + 54 files changed, 3089 insertions(+) create mode 100644 .githooks/post-commit create mode 100644 MANIFEST.in create mode 100644 clmctest/__init__.py create mode 100644 clmctest/inputs/__init__.py create mode 100644 clmctest/inputs/conftest.py create mode 100644 clmctest/inputs/rspec.yml create mode 100644 clmctest/inputs/test_rspec.py create mode 100644 clmctest/inputs/test_telegraf_agents.py create mode 100644 clmctest/monitoring/LineProtocolGenerator.py create mode 100644 clmctest/monitoring/StreamingSim.py create mode 100644 clmctest/monitoring/__init__.py create mode 100644 clmctest/monitoring/conftest.py create mode 100644 clmctest/monitoring/rspec.yml create mode 100644 clmctest/monitoring/test_rspec.py create mode 100644 clmctest/monitoring/test_simresults.py create mode 100644 clmctest/scripts/.pytest_cache/v/cache/lastfailed create mode 100644 clmctest/scripts/__init__.py create mode 100644 clmctest/scripts/rspec.yml create mode 100644 clmctest/scripts/test_config_telegraf.py create mode 100644 clmctest/services/apache/install.sh create mode 100644 clmctest/services/apache/telegraf_apache.conf create mode 100644 clmctest/services/ffmpeg/install.sh create mode 100644 clmctest/services/ffmpeg/telegraf_ffmpeg.conf create mode 100644 clmctest/services/ffmpeg/transcode.sh create mode 100644 clmctest/services/host/install.sh create mode 100644 clmctest/services/host/telegraf_host.conf create mode 100644 clmctest/services/ipendpoint/install.sh create mode 100644 clmctest/services/ipendpoint/telegraf_ipendpoint.conf create mode 100644 clmctest/services/loadtest-streaming/install.sh create mode 100644 clmctest/services/loadtest-streaming/telegraf_loadtest_streaming.conf create mode 100644 clmctest/services/mongo/install.sh create mode 100644 clmctest/services/mongo/telegraf_mongo.conf create mode 100644 clmctest/services/nginx/install.sh create mode 100644 clmctest/services/nginx/nginx.conf create mode 100644 clmctest/services/nginx/telegraf_nginx.conf create mode 100644 clmctest/services/pytest/install.sh create mode 100644 clmctest/services/vlc/install.sh create mode 100644 clmctest/streaming/__init__.py create mode 100644 clmctest/streaming/conftest.py create mode 100644 clmctest/streaming/dashboard.json create mode 100644 clmctest/streaming/influx.json create mode 100644 clmctest/streaming/kapacitor.conf create mode 100644 clmctest/streaming/kapacitor.json create mode 100644 clmctest/streaming/manual.md create mode 100644 clmctest/streaming/report.sh create mode 100644 clmctest/streaming/rspec.yml create mode 100644 clmctest/streaming/rules.json create mode 100644 clmctest/streaming/run.sh create mode 100644 clmctest/streaming/setupCLMC.sh create mode 100644 clmctest/streaming/setupNGINX.sh create mode 100644 clmctest/streaming/stop.sh create mode 100644 clmctest/streaming/test_rspec.py create mode 100644 clmctest/streaming/test_streaming.py create mode 100644 setup.py diff --git a/.githooks/post-commit b/.githooks/post-commit new file mode 100644 index 0000000..eb805fe --- /dev/null +++ b/.githooks/post-commit @@ -0,0 +1,2 @@ +#!/bin/bash +git describe --all --long > git-commit-version.txt \ No newline at end of file diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..993dbd0 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,2 @@ +include MANIFEST.in +recursive-include clmctest *.yml *.sh \ No newline at end of file diff --git a/clmctest/__init__.py b/clmctest/__init__.py new file mode 100644 index 0000000..44f7725 --- /dev/null +++ b/clmctest/__init__.py @@ -0,0 +1 @@ +#!/usr/bin/python3 \ No newline at end of file diff --git a/clmctest/inputs/__init__.py b/clmctest/inputs/__init__.py new file mode 100644 index 0000000..44f7725 --- /dev/null +++ b/clmctest/inputs/__init__.py @@ -0,0 +1 @@ +#!/usr/bin/python3 \ No newline at end of file diff --git a/clmctest/inputs/conftest.py b/clmctest/inputs/conftest.py new file mode 100644 index 0000000..0a1be8f --- /dev/null +++ b/clmctest/inputs/conftest.py @@ -0,0 +1,34 @@ +#!/usr/bin/python3 + +import pytest +import yaml +import pkg_resources +from influxdb import InfluxDBClient + + +@pytest.fixture(scope="module") +def telegraf_agent_config(request): + """ + Reads the service configuration deployed for the streaming simulation test. + + :param request: access the parameters of the fixture + :return: the python object representing the read YAML file + """ + rspec = pkg_resources.resource_filename('clmctest.inputs', 'rspec.yml') + print("rspec file: {0}".format(rspec)) + with open(rspec, 'r') as stream: + data_loaded = yaml.load(stream) + return data_loaded + + +@pytest.fixture(params=[{'database': 'CLMCMetrics'}], scope='module') +def influxdb(telegraf_agent_config, request): + """ + Creates an Influx DB client for the CLMC metrics database + + :param telegraf_agent_config: the fixture returning the yaml configuration + :param request: access the parameters of the fixture + :return: the created Influx DB client + """ + + return InfluxDBClient(host=telegraf_agent_config['hosts'][0]['ip_address'], port=8086, database=request.param['database'], timeout=10) diff --git a/clmctest/inputs/rspec.yml b/clmctest/inputs/rspec.yml new file mode 100644 index 0000000..6dad4be --- /dev/null +++ b/clmctest/inputs/rspec.yml @@ -0,0 +1,103 @@ +hosts: + - name: clmc-service + cpus: 1 + memory: 2048 + disk: "10GB" + forward_ports: + - guest: 8086 + host: 8086 + - guest: 8888 + host: 8888 + - guest: 9092 + host: 9092 + ip_address: "192.168.50.10" + - name: apache + cpus: 1 + memory: 2048 + disk: "10GB" + service_name: "apache" + forward_ports: + - guest: 80 + host: 8881 + ip_address: "192.168.50.11" + location: "DC1" + sfc_id: "MS_Template_1" + sfc_id_instance: "MS_I1" + sf_id: "adaptive_streaming" + sf_id_instance: "adaptive_streaming_I1" + ipendpoint_id: "adaptive_streaming_I1_apache1" + influxdb_url: "http://192.168.50.10:8086" + database_name: "CLMCMetrics" + - name: nginx + cpus: 1 + memory: 2048 + disk: "10GB" + service_name: "nginx" + forward_ports: + - guest: 80 + host: 8082 + ip_address: "192.168.50.13" + location: "DC1" + sfc_id: "MS_Template_1" + sfc_id_instance: "MS_I1" + sf_id: "adaptive_streaming" + sf_id_instance: "adaptive_streaming_nginx_I1" + ipendpoint_id: "adaptive_streaming_nginx_I1_apache1" + influxdb_url: "http://192.168.50.10:8086" + database_name: "CLMCMetrics" + - name: mongo + cpus: 1 + memory: 2048 + disk: "10GB" + service_name: "mongo" + forward_ports: + - guest: 80 + host: 8083 + ip_address: "192.168.50.14" + location: "DC1" + sfc_id: "MS_Template_1" + sfc_id_instance: "MS_I1" + sf_id: "metadata_database" + sf_id_instance: "metadata_database_I1" + ipendpoint_id: "metadata_database_I1_apache1" + influxdb_url: "http://192.168.50.10:8086" + database_name: "CLMCMetrics" + - name: ffmpeg + cpus: 1 + memory: 2048 + disk: "10GB" + service_name: "ffmpeg" + forward_ports: + - guest: 80 + host: 8084 + ip_address: "192.168.50.15" + location: "DC1" + sfc_id: "MS_Template_1" + sfc_id_instance: "MS_I1" + sf_id: "metadata_database" + sf_id_instance: "metadata_database_I1" + ipendpoint_id: "metadata_database_I1_apache1" + influxdb_url: "http://192.168.50.10:8086" + database_name: "CLMCMetrics" + - name: host + cpus: 1 + memory: 2048 + disk: "10GB" + service_name: "host" + forward_ports: + - guest: 80 + host: 8085 + ip_address: "192.168.50.16" + location: "DC1" + sfc_id: "MS_Template_1" + sfc_id_instance: "MS_I1" + sf_id: "adaptive_streaming" + sf_id_instance: "adaptive_streaming_I1" + ipendpoint_id: "adaptive_streaming_I1_apache1" + influxdb_url: "http://192.168.50.10:8086" + database_name: "CLMCMetrics" + - name: test-runner + cpus: 1 + memory: 2048 + disk: "10GB" + ip_address: "192.168.50.17" \ No newline at end of file diff --git a/clmctest/inputs/test_rspec.py b/clmctest/inputs/test_rspec.py new file mode 100644 index 0000000..5442eed --- /dev/null +++ b/clmctest/inputs/test_rspec.py @@ -0,0 +1,43 @@ +#!/usr/bin/python3 + +from subprocess import run +from platform import system +import pytest + + +@pytest.mark.parametrize("service_name", [ + 'clmc-service', + 'apache', + 'nginx', + 'mongo', + 'ffmpeg', + 'host' +]) +def test_service_name(telegraf_agent_config, service_name): + """ + Tests the service names in the configuration. + + :param telegraf_agent_config: the configuration fixture collected from conftest.py + :param service_name the service name to test + """ + + assert any(s['name'] == service_name for s in telegraf_agent_config['hosts']), "{0} not in list of hosts".format(service_name) + print("\nSuccessfully passed configuration test for service name {0}\n".format(service_name)) + + +def test_ping(telegraf_agent_config): + """ + Pings each service to test for liveliness + + :param telegraf_agent_config: the configuration fixture collected from conftest.py + """ + + print("\n") # blank line printed for formatting purposes + + ping_count = 1 + system_dependent_param = "-n" if system().lower() == "windows" else "-c" + + for service in telegraf_agent_config['hosts']: + command = ["ping", system_dependent_param, str(ping_count), service['ip_address']] + assert run(command).returncode == 0, "Service ping test failed for {0} with ip address {1}".format(service['name'], service['ip_address']) + print("\nSuccessfully passed ping test for service: {0}\n".format(service['name'])) diff --git a/clmctest/inputs/test_telegraf_agents.py b/clmctest/inputs/test_telegraf_agents.py new file mode 100644 index 0000000..b2e8ce5 --- /dev/null +++ b/clmctest/inputs/test_telegraf_agents.py @@ -0,0 +1,76 @@ +#!/usr/bin/python3 + +import pytest +from subprocess import run +from platform import system +from influxdb import InfluxDBClient + +@pytest.mark.parametrize("service_name", [ + ('clmc-service'), + ('apache'), + ('nginx'), + ('mongo'), + ('ffmpeg'), + ('host'), + ]) +def test_service_name(telegraf_agent_config, service_name): + assert any(s['name'] == service_name for s in telegraf_agent_config['hosts']), "{0} not in list of hosts".format(service_name) + +def test_ping(telegraf_agent_config): + """ + Pings each service to test for liveliness + + :param streaming_sim_config: the configuration fixture collected from conftest.py + """ + + print("\n") # blank line printed for formatting purposes + + ping_count = 1 + system_dependent_param = "-n" if system().lower() == "windows" else "-c" + + for service in telegraf_agent_config['hosts']: + command = ["ping", system_dependent_param, str(ping_count), service['ip_address']] + assert run(command).returncode == 0, "Service ping test failed for {0} with ip address {1}".format(service['name'], service['ip_address']) + print("\nSuccessfully passed ping test for service: {0}\n".format(service['name'])) + + +@pytest.mark.parametrize("measurement, query, expected_result", [ + ('nginx', 'SELECT mean("requests") AS "mean" FROM "CLMCMetrics"."autogen"."nginx"', 0), + ('cpu', 'SELECT mean("usage_idle") AS "mean" FROM "CLMCMetrics"."autogen"."cpu"', 0), + ('mongodb', 'SELECT mean("net_in_bytes") AS "mean" FROM "CLMCMetrics"."autogen"."mongodb"', 0), + ('net', 'SELECT mean("bytes_sent") AS "mean" FROM "CLMCMetrics"."autogen"."net"', 0), + ('disk', 'SELECT mean("free") AS "mean" FROM "CLMCMetrics"."autogen"."disk"', 0), + ('diskio', 'SELECT mean("write_bytes") AS "mean" FROM "CLMCMetrics"."autogen"."diskio"', 0), + ('mem', 'SELECT mean("free") AS "mean" FROM "CLMCMetrics"."autogen"."mem"', 0) + ]) +def test_all_inputs(influxdb, measurement, query, expected_result): + """ + Tests measurements are received from an input plugin aggregated across all services + + :param influxdb: the influx db client fixture + :param measurement: the measurement to test + :param query: the query to execute + :param expected_result: the expected result from the query + """ + + query_result = influxdb.query('SHOW measurements ON "CLMCMetrics"') + points = list(query_result.get_points()) + assert any(p['name'] == measurement for p in points), "{0} not in measurement list".format(measurement) + + query_result = influxdb.query(query) + points = next(query_result.get_points()) # get_points() returns a generator, to take the first element we can use the next() function + actual_result = points['mean'] + assert actual_result > expected_result, "actual result {0} is not > expected result {1} for query {2}".format(actual_result, str(expected_result), query) + + +@pytest.mark.parametrize("query, expected_result", + [('filter query', 0), + ('filter query', 0), + ('filter query', 0) + ]) +def test_global_tag_filtering(influxdb, query, expected_result): + """Tests that the global tags are inserted correctly into the global configuration using the install CLMC script + """ + # run query + # check result + assert 1 diff --git a/clmctest/monitoring/LineProtocolGenerator.py b/clmctest/monitoring/LineProtocolGenerator.py new file mode 100644 index 0000000..1b19c3c --- /dev/null +++ b/clmctest/monitoring/LineProtocolGenerator.py @@ -0,0 +1,300 @@ +#!/usr/bin/python3 + +# line protocol + +# Method to create a full InfluxDB request statement (based on partial statement from client) +import uuid +from random import randint + + +# Reports TX and RX, scaling on requested quality +def generate_network_report(recieved_bytes, sent_bytes, time): + result = [{"measurement": "net_port_io", + "tags": { + "port_id": "enps03" + }, + "fields": { + "RX_BYTES_PORT_M": recieved_bytes, + "TX_BYTES_PORT_M": sent_bytes + }, + "time": _getNSTime(time) + }] + + return result + + +# Formats VM config +def generate_vm_config(state, cpu, mem, storage, time): + result = [{"measurement": "vm_res_alloc", + "tags": { + "vm_state": state + }, + "fields": { + "cpu": cpu, + "memory": mem, + "storage": storage + }, + "time": _getNSTime(time) + }] + + return result + + +# Reports cpu usage, scaling on requests +def generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, time): + result = [{"measurement": "cpu_usage", + "fields": { + "cpu_usage": cpu_usage, + "cpu_active_time": cpu_active_time, + "cpu_idle_time": cpu_idle_time + }, + "time": _getNSTime(time) + }] + + return result + + +# Reports response times, scaling on number of requests +def generate_mpegdash_report(resource, requests, avg_response_time, peak_response_time, time): + result = [{"measurement": "mpegdash_service", + "tags": { + "cont_nav": resource + }, + "fields": { + "requests": requests, + "avg_response_time": avg_response_time, + "peak_response_time": peak_response_time + }, + "time": _getNSTime(time) + }] + + return result + + +# ipendpoint_route,ipendpoint_id,cont_nav=FQDN HTTP_REQUESTS_FQDN_M, NETWORK_FQDN_LATENCY timestamp +def generate_ipendpoint_route(resource, requests, latency, time): + result = [{"measurement": "ipendpoint_route", + "tags": { + "cont_nav": str(resource) + }, + "fields": { + "http_requests_fqdn_m": requests, + "network_fqdn_latency": latency + }, + "time": _getNSTime(time) + }] + + return result + + +# InfluxDB likes to have time-stamps in nanoseconds +def _getNSTime(time): + # Convert to nano-seconds + timestamp = int(1000000000*time) + + return timestamp + + +# DEPRECATED +# ____________________________________________________________________________ + +# DEPRECATED: old structure, not part of new spec + +# Influx needs strings to be quoted, this provides a utility interface to do this +def quote_wrap(string): + return "\"" + string + "\"" + + +def _generateClientRequest(cReq, id, time): + # Tags first + result = 'sid="' + str(id) + '",' + cReq + + # Fields + # No additional fields here yet + + # Timestamp + result += ' ' + str(_getNSTime(time)) + + # Measurement + return 'request,' + result + + +# Method to create a full InfluxDB response statement +# DEPRECATED: old structure, not part of new spec +def _generateServerResponse(reqID, quality, time, cpuUsage, qualityDifference): + # Tags first + result = ' ' + + # Fields + result += 'quality=' + str(quality) + ',' + result += 'cpuUsage=' + str(cpuUsage) + ',' + result += 'qualityDifference=' + str(qualityDifference) + ',' + result += 'requestID="' + str(reqID) + '",' + result += 'index="' + str(uuid.uuid4()) + '"' + + # Timestamp + result += ' ' + str(_getNSTime(time)) + + # Measurement + # print('response'+result) + return 'response' + result + + +# Formats server config +def _generateServerConfig(ID, location, cpu, mem, storage, time): + # metric + result = 'host_resource' + # Tags + result += ',slice_id=' + quote_wrap(ID) + result += ',location=' + quote_wrap(location) + result += ' ' + # Fields + result += 'cpu=' + str(cpu) + result += ',memory=' + quote_wrap(mem) + result += ',storage=' + quote_wrap(storage) + + # Time + result += ' ' + str(_getNSTime(time)) + + print(result) + return result + + +# Format port config +def _configure_port(port_id, state, rate, time): + # metric + result = 'net_port_config ' + # Fields + result += 'port_id=' + quote_wrap('enps' + port_id) + result += ',port_state=' + quote_wrap(state) + result += ',tx_constraint=' + quote_wrap(rate) + result += ' ' + + # Time + result += ' ' + str(_getNSTime(time)) + + print(result) + return result + + +# Format service function config +def _configure_service_function(state, max_connected_clients): + # measurement + result = 'mpegdash_service_config' + # tags + result += ',service_state='+quote_wrap(state) + result += ' ' + # fields + result += 'max_connected_clients='+str(max_connected_clients) + + return result + + +# Reports memory usage, scaling on requests +def generate_mem_report(requests, total_mem, time): + # Measurement + result = 'mem' + result += ' ' + # field + used = randint(0, min(100, 5*requests)) + available = 100-used + result += 'available_percent='+str(available) + result += ',used_percent='+str(used) + result += ',total='+str(total_mem) + result += ' ' + # Time + result += str(_getNSTime(time)) + print(result) + return result + + +# Formats compute node config +def generate_compute_node_config(slice_id, location, node_id, cpus, mem, storage, time): + # Measurement + result = 'compute_node_config' + # CommonContext Tag + result += ',slide_id='+quote_wrap(slice_id) + # Tag + result += ',location='+quote_wrap(location) + result += ',comp_node_id='+quote_wrap(node_id) + result += ' ' + # field + result += 'cpus='+str(cpus) + result += ',memory='+str(mem) + result += ',storage='+str(storage) + result += ' ' + # Time + result += str(_getNSTime(time)) + print(result) + return result + + +# Formats network resource config +def generate_network_resource_config(slice_id, network_id, bandwidth, time): + # Measurement + result = 'network_resource_config' + # Meta Tag + result += ',slice_id='+quote_wrap(slice_id) + # Tag + result += 'network_id='+quote_wrap(network_id) + result += ' ' + # field + result += 'bandwidth='+str(bandwidth) + result += ' ' + # Time + result += str(_getNSTime(time)) + print(result) + return result + + +# Formats network interface config +def generate_network_interface_config(slice_id, comp_node_id, port_id, rx_constraint, tx_constraint, time): + # Measurement + result = 'network_interface_config' + # Meta Tag + result += ',slice_id'+quote_wrap(slice_id) + # Tags + result += ',comp_node_id='+quote_wrap(comp_node_id) + result += ',port_id='+quote_wrap(port_id) + result += ' ' + # field + result += 'rx_constraint='+str(rx_constraint) + result += ',tx_constraint='+str(tx_constraint) + result += ' ' + # Time + result += str(_getNSTime(time)) + print(result) + return result + + +# Format SF instance config +def generate_sf_instance_surrogate_config(loc, sfc, sfc_i, sf_package, sf_i, cpus, mem, storage, time): + # Measurement + result = 'sf_instance_surrogate_config' + # Meta Tag + result += ',location'+quote_wrap(loc) + result += ',sfc'+quote_wrap(sfc) + result += ',sfc_i'+quote_wrap(sfc_i) + result += ',sf_package'+quote_wrap(sf_package) + result += ',sf_i'+quote_wrap(sf_i) + result += ' ' + # field + result += 'cpus='+str(cpus) + result += ',memory='+str(mem) + result += ',storage='+str(storage) + result += ' ' + # Time + result += str(_getNSTime(time)) + print(result) + return result + + +# Formats context container as part of other line protocol generators +def service_function_measurement(measurement, service_function_context): + result = measurement + result += ',sfc'+quote_wrap(service_function_context.sfc) + result += ',sfc_i'+quote_wrap(service_function_context.sfc_i) + result += ',sf_package'+quote_wrap(service_function_context.sf_package) + result += ',sf_i'+quote_wrap(service_function_context.sf_i) + + return result diff --git a/clmctest/monitoring/StreamingSim.py b/clmctest/monitoring/StreamingSim.py new file mode 100644 index 0000000..223caf7 --- /dev/null +++ b/clmctest/monitoring/StreamingSim.py @@ -0,0 +1,292 @@ +#!/usr/bin/python3 + +import clmctest.monitoring.LineProtocolGenerator as lp +import time +import urllib.parse +import pytest +import random +import sys +from influxdb import InfluxDBClient + +# Simulation parameters +TICK_TIME = 1 +DEFAULT_REQUEST_RATE_INC = 1 +DEFAULT_REQUEST_RATE_INC_PERIOD = 10 +SIMULATION_TIME_SEC = 60 * 60 + +# CLMC parameters +INFLUX_DB_URL = 'http://172.23.1.20:8086' +INFLUX_DB_NAME = 'CLMCMetrics' +AGENT1_URL = 'http://172.23.1.21:8186' +AGENT2_URL = 'http://172.23.1.22:8186' + + +class Sim(object): + """ + Simulator for services + """ + + def __init__(self, influx_url, influx_db_name, agent1_url, agent2_url): + """ + Sets up the simulator object + + :param influx_url: the influx DB url + :param influx_db_name: the influx DB name + """ + + self.influx_db_name = influx_db_name + self.agent1_url = agent1_url + self.agent2_url = agent2_url + + # influx db client is created on initialisation, which will handle the influx DB queries + url_object = urllib.parse.urlparse(influx_url) + self.db_client = InfluxDBClient(host=url_object.hostname, port=url_object.port, database=self.influx_db_name, timeout=10) + + def reset(self): + """ + Resets the influx db by deleting the old database and creating a new one + """ + + # Teardown DB from previous sim and bring it back up + self.db_client.drop_database(self.influx_db_name) + self.db_client.create_database(self.influx_db_name) + + def run(self, simulation_length_seconds): + """ + Runs the simulation + + :param simulation_length_seconds: length of simulation + """ + + start_time = time.time() - SIMULATION_TIME_SEC + sim_time = start_time + + # segment_size : the length of video requested at a time + # bit_rate: MPEG-2 High 1080p 25fps = 80Mbps + ip_endpoints = [{'agent_url': self.agent1_url, 'location': 'DC1', 'cpu': 16, + 'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0, + 'segment_size': 2, 'video_bit_rate': 80, 'packet_size': 1500}, + {'agent_url': self.agent2_url, 'location': 'DC2', 'cpu': 4, + 'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0, + 'segment_size': 2, 'video_bit_rate': 80, 'packet_size': 1500} + ] + + # Simulate configuration of the ipendpoints + # endpoint state->mu, sigma, secs normal distribution + config_delay_dist = {"placing": [10, 0.68], "booting": [10, 0.68], "connecting": [10, 0.68]} + + print("\nSimulation started. Generating data...") + + # Place endpoints + max_delay = 0 + for ip_endpoint in ip_endpoints: + agent_url = urllib.parse.urlparse(ip_endpoint["agent_url"]) + agent_db_client = InfluxDBClient(host=agent_url.hostname, port=agent_url.port, database=self.influx_db_name, timeout=10) + delay_time = self._changeVMState(agent_db_client, sim_time, ip_endpoint, config_delay_dist['placing'][0], + config_delay_dist['placing'][0] * config_delay_dist['placing'][1], + 'placing', 'placed') + max_delay = max(delay_time, max_delay) + sim_time += max_delay + + # Boot endpoints + max_delay = 0 + for ip_endpoint in ip_endpoints: + agent_url = urllib.parse.urlparse(ip_endpoint["agent_url"]) + agent_db_client = InfluxDBClient(host=agent_url.hostname, port=agent_url.port, database=self.influx_db_name, timeout=10) + delay_time = self._changeVMState(agent_db_client, sim_time, ip_endpoint, config_delay_dist['booting'][0], + config_delay_dist['booting'][0] * config_delay_dist['booting'][1], + 'booting', 'booted') + max_delay = max(delay_time, max_delay) + sim_time += max_delay + + # Connect endpoints + max_delay = 0 + for ip_endpoint in ip_endpoints: + agent_url = urllib.parse.urlparse(ip_endpoint["agent_url"]) + agent_db_client = InfluxDBClient(host=agent_url.hostname, port=agent_url.port, database=self.influx_db_name, timeout=10) + delay_time = self._changeVMState(agent_db_client, sim_time, ip_endpoint, config_delay_dist['connecting'][0], + config_delay_dist['connecting'][0] * config_delay_dist['connecting'][1], + 'connecting', 'connected') + max_delay = max(delay_time, max_delay) + sim_time += max_delay + + request_arrival_rate_inc = DEFAULT_REQUEST_RATE_INC + inc_period_count = 0 + for i in range(simulation_length_seconds): + for ip_endpoint in ip_endpoints: + agent_url = urllib.parse.urlparse(ip_endpoint["agent_url"]) + agent_db_client = InfluxDBClient(host=agent_url.hostname, port=agent_url.port, database=self.influx_db_name, timeout=10) + + # linear inc to arrival rate + if inc_period_count >= DEFAULT_REQUEST_RATE_INC_PERIOD: + ip_endpoint['request_arrival_rate'] += request_arrival_rate_inc + inc_period_count = 0 + else: + inc_period_count += 1 + # add new requests to the queue + ip_endpoint['request_queue'] += ip_endpoint['request_arrival_rate'] + + # time to process one second of video (mS) in the current second + request_processing_time = int(random.normalvariate(10, 10 * 0.68)) + request_processing_time = max(request_processing_time, 10) + # time depends on the length of the segments in seconds + request_processing_time *= ip_endpoint['segment_size'] + + # amount of cpu time (mS) per tick + cpu_time_available = ip_endpoint['cpu'] * TICK_TIME * 1000 + max_requests_processed = int(cpu_time_available / request_processing_time) + # calc how many requests processed + if ip_endpoint['request_queue'] <= max_requests_processed: + # processed all of the requests + requests_processed = ip_endpoint['request_queue'] + else: + # processed the maximum number of requests + requests_processed = max_requests_processed + + # calculate cpu usage + cpu_active_time = int(requests_processed * request_processing_time) + cpu_idle_time = int(cpu_time_available - cpu_active_time) + cpu_usage = cpu_active_time / cpu_time_available + agent_db_client.write_points(lp.generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, sim_time)) + + # calc network usage metrics + bytes_rx = 2048 * requests_processed + bytes_tx = int( + ip_endpoint['video_bit_rate'] / 8 * 1000000 * requests_processed * ip_endpoint['segment_size']) + agent_db_client.write_points(lp.generate_network_report(bytes_rx, bytes_tx, sim_time)) + + # time to process all of the requests in the queue + peak_response_time = ip_endpoint['request_queue'] * request_processing_time / ip_endpoint['cpu'] + # mid-range + avg_response_time = (peak_response_time + request_processing_time) / 2 + agent_db_client.write_points(lp.generate_mpegdash_report('http://localhost/server-status?auto', ip_endpoint['request_arrival_rate'], + avg_response_time, peak_response_time, sim_time)) + + # need to calculate this but sent at 5mS for now + network_request_delay = 0.005 + + # calculate network response delays (2km link, 100Mbps) + network_response_delay = self._calcNetworkDelay(2000, 100, ip_endpoint['packet_size'], ip_endpoint['video_bit_rate']) + + e2e_delay = network_request_delay + (avg_response_time / 1000) + network_response_delay + agent_db_client.write_points(lp.generate_ipendpoint_route('http://localhost/server-status?auto', ip_endpoint['request_arrival_rate'], e2e_delay, sim_time)) + + # remove requests processed off the queue + ip_endpoint['request_queue'] -= int(requests_processed) + + sim_time += TICK_TIME + end_time = sim_time + print("Simulation Finished. Start time {0}. End time {1}. Total time {2}".format(start_time, end_time, + end_time - start_time)) + + @staticmethod + def _calcNetworkDelay(distance, bandwidth, packet_size, tx_video_bit_rate): + """ + Calculates the network delay. Declared as static method since it doesn't need access to any instance variables. + + :param distance: distance metres + :param bandwidth: bandwidth Mbps + :param packet_size: packet size bytes + :param tx_video_bit_rate: bp/sec + :return: the calculated network delay + """ + + # propogation delay = distance/speed () (e.g 2000 metres * 2*10^8 for optical fibre) + propogation_delay = distance / (2 * 100000000) + # packetisation delay = ip packet size (bits)/tx rate (e.g. 100Mbp with 0% packet loss) + packetisation_delay = (packet_size * 8) / (bandwidth * 1000000) + # total number of packets to be sent + packets = (tx_video_bit_rate * 1000000) / (packet_size * 8) + + response_delay = packets * (propogation_delay + packetisation_delay) + + return response_delay + + @staticmethod + def _changeVMState(agent_db_client, sim_time, ip_endpoint, mu, sigma, transition_state, next_state): + """ + Send influx data to change VM state. Declared as static method since it doesn't need access to any instance variables. + :param sim_time: + :param ip_endpoint: + :param mu: + :param sigma: + :param transition_state: + :param next_state: + :return: the delay time + """ + + agent_db_client.write_points(lp.generate_vm_config(transition_state, ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], sim_time)) + + delay_time = random.normalvariate(mu, sigma) + + agent_db_client.write_points(lp.generate_vm_config(next_state, ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], sim_time + delay_time)) + + return delay_time + + +@pytest.fixture(scope='module') +def run_simulation_fixture(streaming_sim_config): + """ + A fixture, which checks if the the DB has been created, if not it runs the simulator with a 10 seconds timeout after that + """ + + influx_db_url = "http://" + streaming_sim_config['hosts'][0]['ip_address'] + ":8086" + agent1_url = "http://" + streaming_sim_config['hosts'][1]['ip_address'] + ":8186" + agent2_url = "http://" + streaming_sim_config['hosts'][2]['ip_address'] + ":8186" + + global INFLUX_DB_URL + global INFLUX_DB_NAME + global SIMULATION_TIME_SEC + global AGENT1_URL + global AGENT2_URL + + simulator = Sim(influx_db_url, INFLUX_DB_NAME, agent1_url, agent2_url) + dbs = simulator.db_client.get_list_database() + dbs = [db.get("name") for db in dbs] + + # This check needed to be disabled as the CLMCMetrics database is always created when + # the test starts, irrespective of whether this is the 1st time or not +# if INFLUX_DB_NAME not in dbs: + simulator.reset() + simulator.run(SIMULATION_TIME_SEC) + + print("10 seconds timeout is given so that the data could properly be inserted into the database.") + import time + time.sleep(10) + + +def run_simulation(generate=True): + """ + A method which runs the data generation simulator + :param generate: True for generating data, False for deleting the DB (optional argument, if not given, default value True is used) + """ + + global INFLUX_DB_NAME + global INFLUX_DB_URL + global SIMULATION_TIME_SEC + global AGENT1_URL + global AGENT2_URL + + simulator = Sim(INFLUX_DB_URL, INFLUX_DB_NAME, AGENT1_URL, AGENT2_URL) + + if generate: + simulator.reset() + simulator.run(SIMULATION_TIME_SEC) + else: + simulator.db_client.drop_database(simulator.influx_db_name) + + +if __name__ == "__main__": + """ + The main entry for this module. Code here is executed only if the StreamingSim.py file is executed, + but not when it's imported in another module + """ + + # check if there are any command line arguments given when executing the module + if len(sys.argv) > 1: + # if CLI argument '-c' is set when executing the script, the influx db will be deleted instead of generating data + option = str(sys.argv[1]) != "-c" + run_simulation(generate=option) + else: + # no argument is given to the function call, hence the default value True is used + run_simulation() diff --git a/clmctest/monitoring/__init__.py b/clmctest/monitoring/__init__.py new file mode 100644 index 0000000..44f7725 --- /dev/null +++ b/clmctest/monitoring/__init__.py @@ -0,0 +1 @@ +#!/usr/bin/python3 \ No newline at end of file diff --git a/clmctest/monitoring/conftest.py b/clmctest/monitoring/conftest.py new file mode 100644 index 0000000..828b68c --- /dev/null +++ b/clmctest/monitoring/conftest.py @@ -0,0 +1,36 @@ +#!/usr/bin/python3 + +import pytest +import yaml +import pkg_resources +from influxdb import InfluxDBClient + + +@pytest.fixture(scope="module") +def streaming_sim_config(): + """ + Reads the service configuration deployed for the streaming simulation test. + + :param request: access the parameters of the fixture + :return: the python object representing the read YAML file + """ + rspec = pkg_resources.resource_filename('clmctest.monitoring', 'rspec.yml') + print("rspec file: {0}".format(rspec)) + + with open(rspec, 'r') as stream: + data_loaded = yaml.load(stream) + return data_loaded + + +@pytest.fixture(params=[{'database': 'CLMCMetrics'}], scope='module') +def get_db_client(streaming_sim_config, request): + """ + Creates an Influx DB client for the CLMC metrics database + + :param streaming_sim_config: the fixture returning the yaml configuration + :param request: access the parameters of the fixture + :return: the created Influx DB client + """ + + return InfluxDBClient(host=streaming_sim_config['hosts'][0]['ip_address'], port=8086, database=request.param['database'], timeout=10) + diff --git a/clmctest/monitoring/rspec.yml b/clmctest/monitoring/rspec.yml new file mode 100644 index 0000000..d59e149 --- /dev/null +++ b/clmctest/monitoring/rspec.yml @@ -0,0 +1,52 @@ +hosts: + - name: clmc-service + cpus: 1 + memory: 2048 + disk: "10GB" + forward_ports: + - guest: 8086 + host: 8086 + - guest: 8888 + host: 8888 + - guest: 9092 + host: 9092 + ip_address: "203.0.113.100" + - name: ipendpoint1 + cpus: 1 + memory: 2048 + disk: "10GB" + service_name: "ipendpoint" + forward_ports: + - guest: 80 + host: 8081 + ip_address: "203.0.113.101" + location: "DC1" + sfc_id: "MS_Template_1" + sfc_id_instance: "MS_I1" + sf_id: "adaptive_streaming" + sf_id_instance: "adaptive_streaming_I1" + ipendpoint_id: "adaptive_streaming_I1_apache1" + influxdb_url: "http://203.0.113.100:8086" + database_name: "CLMCMetrics" + - name: ipendpoint2 + cpus: 1 + memory: 2048 + disk: "10GB" + service_name: "ipendpoint" + forward_ports: + - guest: 80 + host: 8082 + ip_address: "203.0.113.102" + location: "DC2" + sfc_id: "MS_Template_1" + sfc_id_instance: "MS_I1" + sf_id: "adaptive_streaming" + sf_id_instance: "adaptive_streaming_I1" + ipendpoint_id: "adaptive_streaming_I1_apache2" + influxdb_url: "http://203.0.113.100:8086" + database_name: "CLMCMetrics" + - name: test-runner + cpus: 1 + memory: 2048 + disk: "10GB" + ip_address: "203.0.113.103" \ No newline at end of file diff --git a/clmctest/monitoring/test_rspec.py b/clmctest/monitoring/test_rspec.py new file mode 100644 index 0000000..ecce587 --- /dev/null +++ b/clmctest/monitoring/test_rspec.py @@ -0,0 +1,40 @@ +#!/usr/bin/python3 + +from subprocess import run +from platform import system +import pytest + + +@pytest.mark.parametrize("service_name", [ + 'clmc-service', + 'ipendpoint1', + 'ipendpoint2' +]) +def test_service_names(streaming_sim_config, service_name): + """ + Tests the service names in the configuration. + + :param streaming_sim_config: the configuration fixture collected from conftest.py + :param service_name the service name to test + """ + + assert any(s['name'] == service_name for s in streaming_sim_config['hosts']), "{0} not in list of hosts".format(service_name) + print("\nSuccessfully passed configuration test for service name {0}\n".format(service_name)) + + +def test_ping(streaming_sim_config): + """ + Pings each service to test for liveliness + + :param streaming_sim_config: the configuration fixture collected from conftest.py + """ + + print("\n") # blank line printed for formatting purposes + + ping_count = 1 + system_dependent_param = "-n" if system().lower() == "windows" else "-c" + + for service in streaming_sim_config['hosts']: + command = ["ping", system_dependent_param, str(ping_count), service['ip_address']] + assert run(command).returncode == 0, "Service ping test failed for {0} with ip address {1}".format(service['name'], service['ip_address']) + print("\nSuccessfully passed ping test for service: {0}\n".format(service['name'])) diff --git a/clmctest/monitoring/test_simresults.py b/clmctest/monitoring/test_simresults.py new file mode 100644 index 0000000..7940fc3 --- /dev/null +++ b/clmctest/monitoring/test_simresults.py @@ -0,0 +1,49 @@ +#!/usr/bin/python3 + +import pytest +from clmctest.monitoring.StreamingSim import run_simulation_fixture + + +class TestSimulation(object): + """ + A testing class used to group all the tests related to the simulation data + """ + + @pytest.mark.parametrize("query, expected_result", [ + ('SELECT count(*) FROM "CLMCMetrics"."autogen"."cpu_usage"', + {"time": "1970-01-01T00:00:00Z", "count_cpu_active_time": 7200, "count_cpu_idle_time": 7200, "count_cpu_usage": 7200}), + ('SELECT count(*) FROM "CLMCMetrics"."autogen"."ipendpoint_route"', + {"time": "1970-01-01T00:00:00Z", "count_http_requests_fqdn_m": 7200, "count_network_fqdn_latency": 7200}), + ('SELECT count(*) FROM "CLMCMetrics"."autogen"."mpegdash_service"', + {"time": "1970-01-01T00:00:00Z", "count_avg_response_time": 7200, "count_peak_response_time": 7200, "count_requests": 7200}), + ('SELECT count(*) FROM "CLMCMetrics"."autogen"."net_port_io"', + {"time": "1970-01-01T00:00:00Z", "count_RX_BYTES_PORT_M": 7200, "count_TX_BYTES_PORT_M": 7200}), + ('SELECT count(*) FROM "CLMCMetrics"."autogen"."vm_res_alloc"', + {"time": "1970-01-01T00:00:00Z", "count_cpu": 12, "count_memory": 12, "count_storage": 12}) + ]) + def test_simulation(self, query, expected_result, get_db_client, run_simulation_fixture): + """ + This is the entry point of the test. This method will be found and executed when the module is ran using pytest + + :param query: the query to execute (value obtained from the pytest parameter decorator) + :param expected_result: the result expected from executing the query (value obtained from the pytest parameter decorator) + :param get_db_client the import db client fixture - imported from contest.py + :param run_simulation_fixture: the imported fixture to use to generate the testing data - the return value of the fixture is not needed in this case + """ + + # pytest automatically goes through all queries under test, declared in the parameters decorator + + print("\n") # prints a blank line for formatting purposes + + # the raise_errors=False argument is given so that we could actually test that the DB didn't return any errors instead of raising an exception + query_result = get_db_client.query(query, raise_errors=False) + + # test the error attribute of the result is None, that is no error is returned from executing the DB query + assert query_result.error is None, "An error was encountered while executing query {0}.".format(query) + + # get the dictionary of result points; the next() function just gets the first element of the query results iterator (we only expect one item in the iterator) + actual_result = next(query_result.get_points()) + + assert expected_result == actual_result, "Simulation test failure" + + print("Successfully passed test for the following query: {0}".format(query)) diff --git a/clmctest/scripts/.pytest_cache/v/cache/lastfailed b/clmctest/scripts/.pytest_cache/v/cache/lastfailed new file mode 100644 index 0000000..a120e0d --- /dev/null +++ b/clmctest/scripts/.pytest_cache/v/cache/lastfailed @@ -0,0 +1,3 @@ +{ + "test_config_telegraf.py::test_write_telegraf_conf": true +} \ No newline at end of file diff --git a/clmctest/scripts/__init__.py b/clmctest/scripts/__init__.py new file mode 100644 index 0000000..44f7725 --- /dev/null +++ b/clmctest/scripts/__init__.py @@ -0,0 +1 @@ +#!/usr/bin/python3 \ No newline at end of file diff --git a/clmctest/scripts/rspec.yml b/clmctest/scripts/rspec.yml new file mode 100644 index 0000000..f69ab5d --- /dev/null +++ b/clmctest/scripts/rspec.yml @@ -0,0 +1,7 @@ +hosts: + - name: test-runner + cpus: 1 + memory: 2048 + disk: "10GB" + ip_address: "192.168.50.10" + \ No newline at end of file diff --git a/clmctest/scripts/test_config_telegraf.py b/clmctest/scripts/test_config_telegraf.py new file mode 100644 index 0000000..2ae71b8 --- /dev/null +++ b/clmctest/scripts/test_config_telegraf.py @@ -0,0 +1,88 @@ +#!/usr/bin/python3 + +import pytest +import subprocess + +def test_write_telegraf_conf(): + + # test telegraf monitoring configuration + TELEGRAF_CONF_DIR="/etc/telegraf" + LOCATION="DC1" + SFC_ID="media_service_A" + SFC_ID_INSTANCE="media_service_A_instance" + SF_ID="streaming_service" + SF_ID_INSTANCE="streaming_service_instance" + IP_ENDPOINT_ID="endpoint" + INFLUXDB_URL="http://172.29.236.10" + DATABASE_NAME="experimentation_database" + + try: + # mk telegraf conf directory + + (out, err, code) = run_command('sudo mkdir -p /etc/telegraf') + assert code == 0, "Failed to create telegraf conf dir : " + str(code) + ", cmd=" + cmd + + (out, err, code) = run_command('sudo mkdir -p /etc/telegraf/telegraf.d') + assert code == 0, "Failed to create telegraf include dir : " + str(code) + ", cmd=" + cmd + + # run write config template script with no telegraf.d directory + (out, err, code) = run_command('sudo cp /vagrant/scripts/clmc-agent/telegraf.conf /etc/telegraf/') + assert code == 0, "Failed to copy telegraf.conf : " + str(code) + ", cmd=" + cmd + + cmd = 'sudo cp /vagrant/scripts/clmc-agent/telegraf_output.conf /etc/telegraf/telegraf.d/' + (out, err, code) = run_command(cmd) + assert code == 0, "Failed to copy telegraf_output.conf : " + str(code) + ", cmd=" + cmd + + # run template relacement script with incorrect arguments + cmd = 'sudo /vagrant/scripts/clmc-agent/configure.sh' + (out, err, code) = run_command(cmd) + assert code == 1, "Failed to return error on incorrect arguments : " + str(code) + ", cmd=" + cmd + + # run template relacement script with all arguments + cmd = 'sudo /vagrant/scripts/clmc-agent/configure.sh ' + LOCATION + ' ' + SFC_ID + ' ' + SFC_ID_INSTANCE + ' ' + SF_ID + ' ' + SF_ID_INSTANCE + ' ' + IP_ENDPOINT_ID + ' ' + INFLUXDB_URL + ' ' + DATABASE_NAME + (out, err, code) = run_command(cmd) + assert code == 0, "Configure command returned error, output=" + str(out) + ", cmd=" + cmd + + # check that replacement was correct in telegraf.conf + try: + TELEGRAF_GENERAL_CONF_FILE = TELEGRAF_CONF_DIR + "/telegraf.conf" + with open(TELEGRAF_GENERAL_CONF_FILE) as general_conf: + lines = general_conf.read() + assert lines.find(LOCATION), "Cannot find location" + assert lines.find(SFC_ID), "Cannot find sfc_id" + assert lines.find(SFC_ID_INSTANCE), "Cannot find sfc_id_instance" + assert lines.find(SF_ID), "Cannot find sfc_id" + assert lines.find(SF_ID_INSTANCE), "Cannot find sf_id_instance" + assert lines.find(IP_ENDPOINT_ID), "Cannot find location" + except FileNotFoundError: + assert False, "Telegraf general conf file not found, " + TELEGRAF_GENERAL_CONF_FILE + + # check that replacement was correct in telegraf_output.conf + try: + TELEGRAF_OUTPUT_CONF_FILE = TELEGRAF_CONF_DIR + "/telegraf.d/telegraf_output.conf" + with open(TELEGRAF_OUTPUT_CONF_FILE) as output_conf: + lines = output_conf.read() + assert lines.find(INFLUXDB_URL), "Cannot find influx_db" + assert lines.find(DATABASE_NAME), "Cannot find database" + except FileNotFoundError: + assert False, "Telegraf output conf file not found, " + TELEGRAF_OUTPUT_CONF_FILE + + finally: + # clean up telegraf after test +# run_command("sudo rm -rf /etc/telegraf") + print("final") +# wrapper for executing commands on the cli, returning (std_out, std_error, process_return_code) +def run_command(cmd): + """Run a shell command. + + Arguments: + cmd {string} -- command to run in the shell + + Returns: + stdout, stderr, exit code -- tuple of the process's stdout, stderr and exit code (0 on success) + """ + proc = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True) + out, err = proc.communicate() + return_code = proc.returncode + return out, err, return_code + diff --git a/clmctest/services/apache/install.sh b/clmctest/services/apache/install.sh new file mode 100644 index 0000000..fc93bf5 --- /dev/null +++ b/clmctest/services/apache/install.sh @@ -0,0 +1,33 @@ +#!/bin/bash +#///////////////////////////////////////////////////////////////////////// +#// +#// (c) University of Southampton IT Innovation Centre, 2017 +#// +#// Copyright in this software belongs to University of Southampton +#// IT Innovation Centre of Gamma House, Enterprise Road, +#// Chilworth Science Park, Southampton, SO16 7NS, UK. +#// +#// This software may not be used, sold, licensed, transferred, copied +#// or reproduced in whole or in part in any manner or form or in or +#// on any media by any person other than in accordance with the terms +#// of the Licence Agreement supplied with the software, or otherwise +#// without the prior written consent of the copyright owners. +#// +#// This software is distributed WITHOUT ANY WARRANTY, without even the +#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +#// PURPOSE, except where stated in the Licence Agreement supplied with +#// the software. +#// +#// Created By : Michael Boniface +#// Created Date : 23/01/2018 +#// Created for Project : FLAME +#// +#///////////////////////////////////////////////////////////////////////// + +# Install apache +sudo apt-get update +sudo apt-get -y install apache2 + +# start apache +apachectl -k start +apachectl -k restart diff --git a/clmctest/services/apache/telegraf_apache.conf b/clmctest/services/apache/telegraf_apache.conf new file mode 100644 index 0000000..dd61410 --- /dev/null +++ b/clmctest/services/apache/telegraf_apache.conf @@ -0,0 +1,19 @@ +[[inputs.apache]] + ## An array of URLs to gather from, must be directed at the machine + ## readable version of the mod_status page including the auto query string. + ## Default is "http://localhost/server-status?auto". + urls = ["http://localhost:8890/server-status?auto"] + + ## Credentials for basic HTTP authentication. + # username = "myuser" + # password = "mypassword" + + ## Maximum time to receive response. + # response_timeout = "5s" + + ## Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false \ No newline at end of file diff --git a/clmctest/services/ffmpeg/install.sh b/clmctest/services/ffmpeg/install.sh new file mode 100644 index 0000000..02d6e1e --- /dev/null +++ b/clmctest/services/ffmpeg/install.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +apt-get update +apt-get -y install zip python + +wget http://zebulon.bok.net/Bento4/binaries/Bento4-SDK-1-5-1-621.x86_64-unknown-linux.zip +unzip Bento4-SDK-1-5-1-621.x86_64-unknown-linux.zip + +mv Bento4-SDK-1-5-1-621.x86_64-unknown-linux /opt/ +rm Bento4-SDK-1-5-1-621.x86_64-unknown-linux.zip + +add-apt-repository -y ppa:jonathonf/ffmpeg-3 +apt-get update && apt -y install ffmpeg libav-tools x264 x265 + +ffmpeg -version diff --git a/clmctest/services/ffmpeg/telegraf_ffmpeg.conf b/clmctest/services/ffmpeg/telegraf_ffmpeg.conf new file mode 100644 index 0000000..efe72dc --- /dev/null +++ b/clmctest/services/ffmpeg/telegraf_ffmpeg.conf @@ -0,0 +1,15 @@ +# # Influx HTTP write listener +[[inputs.http_listener]] + ## Address and port to host HTTP listener on + service_address = ":8186" + + ## timeouts + read_timeout = "10s" + write_timeout = "10s" + + ## HTTPS + #tls_cert= "/etc/telegraf/cert.pem" + #tls_key = "/etc/telegraf/key.pem" + + ## MTLS + #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] \ No newline at end of file diff --git a/clmctest/services/ffmpeg/transcode.sh b/clmctest/services/ffmpeg/transcode.sh new file mode 100644 index 0000000..877472b --- /dev/null +++ b/clmctest/services/ffmpeg/transcode.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# +# Bento4 must be manually obtained from https://www.bento4.com/downloads/ ffmpeg can be installed as follows +# (Debian - http://www.deb-multimedia.org/) ~$ +# sudo echo deb http://www.deb-multimedia.org jessie main non-free >> /etc/apt/sources.list +# ~$ sudo apt update ~$ sudo apt install deb-multimedia-keyring ~$ sudo apt update && sudo apt-get dist-upgrade +# +# First encode the video to 24fps!!! and MP4 (h.264) +# +# Video MP4 file +INPUT=$1 +OUTPUT_iFRAMES="$1-iFrames.mp4" +OUTPUT_FRAGMENTED="$OUTPUT_iFRAMES-Fragmented.mp4" + +OUTPUT_FOLDER_NAME=$(date +%Y%m%d%H%M%S)$1 +echo "OUTPUT_FOLDER_NAME: "$OUTPUT_FOLDER_NAME + +rm -rf $OUTPUT_FOLDER_NAME +mkdir $OUTPUT_FOLDER_NAME + +# Insert Correct number of I frames +#ffmpeg -y -i $INPUT -profile:v baseline -level 3.0 -c:a libfdk_aac -ac 2 -ab 128k -c:v libx264 -x264opts 'keyint=24:min-keyint=24:no-scenecut' -b:v 400k -maxrate 400k -bufsize 1000k -vf "scale=-1:360" $OUTPUT_iFRAMES + +ffmpeg -y -i $INPUT -profile:v baseline -level 3.0 -c:a aac -ac 2 -ab 128k -c:v libx264 -x264opts 'keyint=24:min-keyint=24:no-scenecut' -b:v 400k -maxrate 400k -bufsize 1000k -vf "scale=-1:360" -strict experimental $OUTPUT_FOLDER_NAME"/"$OUTPUT_iFRAMES + +# fragment MP4 +/opt/Bento4-SDK-1-5-1-621.x86_64-unknown-linux/bin/mp4fragment --timescale 1000 $OUTPUT_FOLDER_NAME"/"$OUTPUT_iFRAMES $OUTPUT_FOLDER_NAME"/"$OUTPUT_FRAGMENTED + +# Option 1 with Bento4 +/opt/Bento4-SDK-1-5-1-621.x86_64-unknown-linux/bin/mp4dash --mpd-name=stream.mpd --use-segment-list --use-compat-namespace -o $OUTPUT_FOLDER_NAME"/"$OUTPUT_FOLDER_NAME $OUTPUT_FOLDER_NAME"/"$OUTPUT_FRAGMENTED + +cd $OUTPUT_FOLDER_NAME +tar -cvzf $OUTPUT_FOLDER_NAME".gz" $OUTPUT_FOLDER_NAME + diff --git a/clmctest/services/host/install.sh b/clmctest/services/host/install.sh new file mode 100644 index 0000000..83cc525 --- /dev/null +++ b/clmctest/services/host/install.sh @@ -0,0 +1,28 @@ +#!/bin/bash +#///////////////////////////////////////////////////////////////////////// +#// +#// (c) University of Southampton IT Innovation Centre, 2017 +#// +#// Copyright in this software belongs to University of Southampton +#// IT Innovation Centre of Gamma House, Enterprise Road, +#// Chilworth Science Park, Southampton, SO16 7NS, UK. +#// +#// This software may not be used, sold, licensed, transferred, copied +#// or reproduced in whole or in part in any manner or form or in or +#// on any media by any person other than in accordance with the terms +#// of the Licence Agreement supplied with the software, or otherwise +#// without the prior written consent of the copyright owners. +#// +#// This software is distributed WITHOUT ANY WARRANTY, without even the +#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +#// PURPOSE, except where stated in the Licence Agreement supplied with +#// the software. +#// +#// Created By : Michael Boniface +#// Created Date : 23/01/2018 +#// Created for Project : FLAME +#// +#///////////////////////////////////////////////////////////////////////// + +# Install host +# This is a dummy script as the endpoint is driven by simulation \ No newline at end of file diff --git a/clmctest/services/host/telegraf_host.conf b/clmctest/services/host/telegraf_host.conf new file mode 100644 index 0000000..1fdd33a --- /dev/null +++ b/clmctest/services/host/telegraf_host.conf @@ -0,0 +1,80 @@ + +############################################################################### +# INPUTS # +############################################################################### +# # Read metrics about network interface usage + [[inputs.net]] +# ## By default, telegraf gathers stats from any up interface (excluding loopback) +# ## Setting interfaces will tell it to gather these explicit interfaces, +# ## regardless of status. +# ## +# # interfaces = ["eth0"] + +# Read metrics about cpu usage +[[inputs.cpu]] + ## Whether to report per-cpu stats or not + percpu = true + ## Whether to report total system cpu stats or not + totalcpu = true + ## If true, collect raw CPU time metrics. + collect_cpu_time = false + ## If true, compute and report the sum of all non-idle CPU states. + #report_active = false + + +# Read metrics about disk usage by mount point +[[inputs.disk]] + ## By default, telegraf gather stats for all mountpoints. + ## Setting mountpoints will restrict the stats to the specified mountpoints. + # mount_points = ["/"] + + ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually + ## present on /run, /var/run, /dev/shm or /dev). + ignore_fs = ["tmpfs", "devtmpfs", "devfs"] + + +# Read metrics about disk IO by device +[[inputs.diskio]] + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. + # devices = ["sda", "sdb"] + ## Uncomment the following line if you need disk serial numbers. + # skip_serial_number = false + # + ## On systems which support it, device metadata can be added in the form of + ## tags. + ## Currently only Linux is supported via udev properties. You can view + ## available properties for a device by running: + ## 'udevadm info -q property -n /dev/sda' + # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] + # + ## Using the same metadata source as device_tags, you can also customize the + ## name of the device via templates. + ## The 'name_templates' parameter is a list of templates to try and apply to + ## the device. The template may contain variables in the form of '$PROPERTY' or + ## '${PROPERTY}'. The first template which does not contain any variables not + ## present for the device is used as the device name tag. + ## The typical use case is for LVM volumes, to get the VG/LV name instead of + ## the near-meaningless DM-0 name. + # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] + +# Read metrics about memory usage +[[inputs.mem]] + # no configuration + +# # Influx HTTP write listener +[[inputs.http_listener]] + ## Address and port to host HTTP listener on + service_address = ":8186" + + ## timeouts + read_timeout = "10s" + write_timeout = "10s" + + ## HTTPS + #tls_cert= "/etc/telegraf/cert.pem" + #tls_key = "/etc/telegraf/key.pem" + + ## MTLS + #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] \ No newline at end of file diff --git a/clmctest/services/ipendpoint/install.sh b/clmctest/services/ipendpoint/install.sh new file mode 100644 index 0000000..7cc5c34 --- /dev/null +++ b/clmctest/services/ipendpoint/install.sh @@ -0,0 +1,28 @@ +#!/bin/bash +#///////////////////////////////////////////////////////////////////////// +#// +#// (c) University of Southampton IT Innovation Centre, 2017 +#// +#// Copyright in this software belongs to University of Southampton +#// IT Innovation Centre of Gamma House, Enterprise Road, +#// Chilworth Science Park, Southampton, SO16 7NS, UK. +#// +#// This software may not be used, sold, licensed, transferred, copied +#// or reproduced in whole or in part in any manner or form or in or +#// on any media by any person other than in accordance with the terms +#// of the Licence Agreement supplied with the software, or otherwise +#// without the prior written consent of the copyright owners. +#// +#// This software is distributed WITHOUT ANY WARRANTY, without even the +#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +#// PURPOSE, except where stated in the Licence Agreement supplied with +#// the software. +#// +#// Created By : Michael Boniface +#// Created Date : 23/01/2018 +#// Created for Project : FLAME +#// +#///////////////////////////////////////////////////////////////////////// + +# Install ipendpoint +# This is a dummy script as the endpoint is driven by simulation \ No newline at end of file diff --git a/clmctest/services/ipendpoint/telegraf_ipendpoint.conf b/clmctest/services/ipendpoint/telegraf_ipendpoint.conf new file mode 100644 index 0000000..efe72dc --- /dev/null +++ b/clmctest/services/ipendpoint/telegraf_ipendpoint.conf @@ -0,0 +1,15 @@ +# # Influx HTTP write listener +[[inputs.http_listener]] + ## Address and port to host HTTP listener on + service_address = ":8186" + + ## timeouts + read_timeout = "10s" + write_timeout = "10s" + + ## HTTPS + #tls_cert= "/etc/telegraf/cert.pem" + #tls_key = "/etc/telegraf/key.pem" + + ## MTLS + #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] \ No newline at end of file diff --git a/clmctest/services/loadtest-streaming/install.sh b/clmctest/services/loadtest-streaming/install.sh new file mode 100644 index 0000000..7d6ef6d --- /dev/null +++ b/clmctest/services/loadtest-streaming/install.sh @@ -0,0 +1,30 @@ +#!/bin/bash +#///////////////////////////////////////////////////////////////////////// +#// +#// (c) University of Southampton IT Innovation Centre, 2017 +#// +#// Copyright in this software belongs to University of Southampton +#// IT Innovation Centre of Gamma House, Enterprise Road, +#// Chilworth Science Park, Southampton, SO16 7NS, UK. +#// +#// This software may not be used, sold, licensed, transferred, copied +#// or reproduced in whole or in part in any manner or form or in or +#// on any media by any person other than in accordance with the terms +#// of the Licence Agreement supplied with the software, or otherwise +#// without the prior written consent of the copyright owners. +#// +#// This software is distributed WITHOUT ANY WARRANTY, without even the +#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +#// PURPOSE, except where stated in the Licence Agreement supplied with +#// the software. +#// +#// Created By : Michael Boniface +#// Created Date : 14/02/2018 +#// Created for Project : FLAME +#// +#///////////////////////////////////////////////////////////////////////// +set -euo pipefail + +echo "REPO_ROOT:"$REPO_ROOT +eval '$REPO_ROOT/test/services/vlc/install.sh' +eval '$REPO_ROOT/test/services/pytest/install.sh' \ No newline at end of file diff --git a/clmctest/services/loadtest-streaming/telegraf_loadtest_streaming.conf b/clmctest/services/loadtest-streaming/telegraf_loadtest_streaming.conf new file mode 100644 index 0000000..3e30465 --- /dev/null +++ b/clmctest/services/loadtest-streaming/telegraf_loadtest_streaming.conf @@ -0,0 +1,112 @@ +# Telegraf configuration + +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. + +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. + +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. + +# Global tags can be specified here in key="value" format. +[global_tags] + # location of the data centre + location="{{LOCATION}}" + # media service template id + sfc="{{SFC_ID}}" + # media service instance + sfc_i="{{SFC_ID_INSTANCE}}" + # service function type + sf="{{SF_ID}}" + # service function instance id + sf_i="{{SF_ID_INSTANCE}}" + # ipendpoint id aka surrogate instance + ipendpoint="{{IP_ENDPOINT_ID}}" + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "10s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will cache metric_buffer_limit metrics for each output, and will + ## flush this buffer on a successful write. + metric_buffer_limit = 1000 + ## Flush the buffer whenever full, regardless of flush_interval. + flush_buffer_when_full = true + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Default flushing interval for all outputs. You shouldn't set this below + ## interval. Maximum flush_interval will be flush_interval + flush_jitter + flush_interval = "10s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## Logging configuration: + ## Run telegraf in debug mode + debug = false + ## Run telegraf in quiet mode + quiet = false + ## Specify the log file name. The empty string means to log to stdout. + logfile = "/var/log/telegraf/telegraf.log" + + ## Override default hostname, if empty use os.Hostname() + hostname = "" + + +############################################################################### +# OUTPUTS # +############################################################################### + +# Configuration for influxdb server to send metrics to +[[outputs.influxdb]] + # The full HTTP or UDP endpoint URL for your InfluxDB instance. + # Multiple urls can be specified but it is assumed that they are part of the same + # cluster, this means that only ONE of the urls will be written to each interval. + # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example + urls = ["{{INFLUXDB_URL}}"] # required + # The target database for metrics (telegraf will create it if not exists) + database = "{{DATABASE_NAME}}" # required + # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". + # note: using second precision greatly helps InfluxDB compression + precision = "s" + + ## Write timeout (for the InfluxDB client), formatted as a string. + ## If not provided, will default to 5s. 0s means no timeout (not recommended). + timeout = "5s" + # username = "telegraf" + # password = "metricsmetricsmetricsmetrics" + # Set the user agent for HTTP POSTs (can be useful for log differentiation) + # user_agent = "telegraf" + # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) + # udp_payload = 512 + + +############################################################################### +# INPUTS # +############################################################################### +# # Influx HTTP write listener +[[inputs.http_listener]] + ## Address and port to host HTTP listener on + service_address = ":8186" + + ## timeouts + read_timeout = "10s" + write_timeout = "10s" + + ## HTTPS + #tls_cert= "/etc/telegraf/cert.pem" + #tls_key = "/etc/telegraf/key.pem" + + ## MTLS + #tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] \ No newline at end of file diff --git a/clmctest/services/mongo/install.sh b/clmctest/services/mongo/install.sh new file mode 100644 index 0000000..e00502f --- /dev/null +++ b/clmctest/services/mongo/install.sh @@ -0,0 +1,33 @@ +#!/bin/bash +#///////////////////////////////////////////////////////////////////////// +#// +#// (c) University of Southampton IT Innovation Centre, 2017 +#// +#// Copyright in this software belongs to University of Southampton +#// IT Innovation Centre of Gamma House, Enterprise Road, +#// Chilworth Science Park, Southampton, SO16 7NS, UK. +#// +#// This software may not be used, sold, licensed, transferred, copied +#// or reproduced in whole or in part in any manner or form or in or +#// on any media by any person other than in accordance with the terms +#// of the Licence Agreement supplied with the software, or otherwise +#// without the prior written consent of the copyright owners. +#// +#// This software is distributed WITHOUT ANY WARRANTY, without even the +#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +#// PURPOSE, except where stated in the Licence Agreement supplied with +#// the software. +#// +#// Created By : Michael Boniface +#// Created Date : 23/01/2018 +#// Created for Project : FLAME +#// +#///////////////////////////////////////////////////////////////////////// + +# Install apache +sudo apt-get update +sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 2930ADAE8CAF5059EE73BB4B58712A2291FA4AD5 +echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.6 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.6.list +sudo apt-get update +sudo apt-get install -y mongodb-org +sudo service mongod start \ No newline at end of file diff --git a/clmctest/services/mongo/telegraf_mongo.conf b/clmctest/services/mongo/telegraf_mongo.conf new file mode 100644 index 0000000..80a6a69 --- /dev/null +++ b/clmctest/services/mongo/telegraf_mongo.conf @@ -0,0 +1,15 @@ +[[inputs.mongodb]] + ## An array of URLs of the form: + ## "mongodb://" [user ":" pass "@"] host [ ":" port] + ## For example: + ## mongodb://user:auth_key@10.10.3.30:27017, + ## mongodb://10.10.3.33:18832, + servers = ["mongodb://127.0.0.1:27017"] + gather_perdb_stats = false + + ## Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false \ No newline at end of file diff --git a/clmctest/services/nginx/install.sh b/clmctest/services/nginx/install.sh new file mode 100644 index 0000000..d8baa46 --- /dev/null +++ b/clmctest/services/nginx/install.sh @@ -0,0 +1,52 @@ +#!/bin/bash +#///////////////////////////////////////////////////////////////////////// +#// +#// (c) University of Southampton IT Innovation Centre, 2017 +#// +#// Copyright in this software belongs to University of Southampton +#// IT Innovation Centre of Gamma House, Enterprise Road, +#// Chilworth Science Park, Southampton, SO16 7NS, UK. +#// +#// This software may not be used, sold, licensed, transferred, copied +#// or reproduced in whole or in part in any manner or form or in or +#// on any media by any person other than in accordance with the terms +#// of the Licence Agreement supplied with the software, or otherwise +#// without the prior written consent of the copyright owners. +#// +#// This software is distributed WITHOUT ANY WARRANTY, without even the +#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +#// PURPOSE, except where stated in the Licence Agreement supplied with +#// the software. +#// +#// Created By : Michael Boniface +#// Created Date : 01/02/2018 +#// Created for Project : FLAME +#// +#///////////////////////////////////////////////////////////////////////// + +# Install nginx +apt-get update +yes Y | apt-get install nginx + +# Need to set up basic stats as this not configured by default +# http://nginx.org/en/docs/http/ngx_http_stub_status_module.html + +NGINX_CONF_SOURCE=$REPO_ROOT"/clmctest/services/nginx/nginx.conf" +NGINX_CONF_TARGET="/etc/nginx/nginx.conf" + +# Check the target telegraf directory exists +if [ ! -f "$NGINX_CONF_SOURCE" ]; then + echo "Error: NGINX conf file does not exist on in the repo. "$NGINX_CONF_SOURCE + exit 1 +fi + +cp -rf $NGINX_CONF_SOURCE $NGINX_CONF_TARGET + +# Check the target telegraf directory exists +if [ ! -f "$NGINX_CONF_TARGET" ]; then + echo "Error: NGINX conf copy failed to target machine. "$NGINX_CONF_TARGET + exit 1 +fi + +nginx -s reload +systemctl start nginx \ No newline at end of file diff --git a/clmctest/services/nginx/nginx.conf b/clmctest/services/nginx/nginx.conf new file mode 100644 index 0000000..1c906be --- /dev/null +++ b/clmctest/services/nginx/nginx.conf @@ -0,0 +1,14 @@ + + +events { + worker_connections 4096; ## Default: 1024 +} +http { + server { + location /nginx_status { + stub_status on; + access_log off; + allow all; + } + } +} \ No newline at end of file diff --git a/clmctest/services/nginx/telegraf_nginx.conf b/clmctest/services/nginx/telegraf_nginx.conf new file mode 100644 index 0000000..c91cdeb --- /dev/null +++ b/clmctest/services/nginx/telegraf_nginx.conf @@ -0,0 +1,7 @@ +# Read Nginx's basic status information (ngx_http_stub_status_module) +[[inputs.nginx]] + ## An array of Nginx stub_status URI to gather stats. + urls = ["http://localhost:80/nginx_status"] + + ## HTTP response timeout (default: 5s) +# response_timeout = "5s" \ No newline at end of file diff --git a/clmctest/services/pytest/install.sh b/clmctest/services/pytest/install.sh new file mode 100644 index 0000000..ce998ad --- /dev/null +++ b/clmctest/services/pytest/install.sh @@ -0,0 +1,30 @@ +#!/bin/bash +#///////////////////////////////////////////////////////////////////////// +#// +#// (c) University of Southampton IT Innovation Centre, 2017 +#// +#// Copyright in this software belongs to University of Southampton +#// IT Innovation Centre of Gamma House, Enterprise Road, +#// Chilworth Science Park, Southampton, SO16 7NS, UK. +#// +#// This software may not be used, sold, licensed, transferred, copied +#// or reproduced in whole or in part in any manner or form or in or +#// on any media by any person other than in accordance with the terms +#// of the Licence Agreement supplied with the software, or otherwise +#// without the prior written consent of the copyright owners. +#// +#// This software is distributed WITHOUT ANY WARRANTY, without even the +#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +#// PURPOSE, except where stated in the Licence Agreement supplied with +#// the software. +#// +#// Created By : Michael Boniface +#// Created Date : 24/02/2018 +#// Created for Project : FLAME +#// +#///////////////////////////////////////////////////////////////////////// +apt-get update +apt-get -y install python3 python3-pip python-influxdb +update-alternatives --install /usr/bin/python python /usr/bin/python3 10 +pip3 install pytest pyyaml +pip3 install --upgrade influxdb diff --git a/clmctest/services/vlc/install.sh b/clmctest/services/vlc/install.sh new file mode 100644 index 0000000..02e3b8f --- /dev/null +++ b/clmctest/services/vlc/install.sh @@ -0,0 +1,29 @@ +#!/bin/bash +#///////////////////////////////////////////////////////////////////////// +#// +#// (c) University of Southampton IT Innovation Centre, 2017 +#// +#// Copyright in this software belongs to University of Southampton +#// IT Innovation Centre of Gamma House, Enterprise Road, +#// Chilworth Science Park, Southampton, SO16 7NS, UK. +#// +#// This software may not be used, sold, licensed, transferred, copied +#// or reproduced in whole or in part in any manner or form or in or +#// on any media by any person other than in accordance with the terms +#// of the Licence Agreement supplied with the software, or otherwise +#// without the prior written consent of the copyright owners. +#// +#// This software is distributed WITHOUT ANY WARRANTY, without even the +#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +#// PURPOSE, except where stated in the Licence Agreement supplied with +#// the software. +#// +#// Created By : Michael Boniface +#// Created Date : 12/02/2018 +#// Created for Project : FLAME +#// +#///////////////////////////////////////////////////////////////////////// + +add-apt-repository -y ppa:videolan/master-daily +apt-get update +apt-get -y install vlc \ No newline at end of file diff --git a/clmctest/streaming/__init__.py b/clmctest/streaming/__init__.py new file mode 100644 index 0000000..44f7725 --- /dev/null +++ b/clmctest/streaming/__init__.py @@ -0,0 +1 @@ +#!/usr/bin/python3 \ No newline at end of file diff --git a/clmctest/streaming/conftest.py b/clmctest/streaming/conftest.py new file mode 100644 index 0000000..8aa64e5 --- /dev/null +++ b/clmctest/streaming/conftest.py @@ -0,0 +1,20 @@ +#!/usr/bin/python3 + +import pytest +import yaml +import pkg_resources + +@pytest.fixture(scope="module") +def streaming_config(): + """ + Reads the service configuration deployed for the streaming simulation test. + + :param request: access the parameters of the fixture + :return: the python object representing the read YAML file + """ + rspec = pkg_resources.resource_filename('clmctest.streaming', 'rspec.yml') + print("rspec file: {0}".format(rspec)) + + with open(rspec, 'r') as stream: + data_loaded = yaml.load(stream) + return data_loaded diff --git a/clmctest/streaming/dashboard.json b/clmctest/streaming/dashboard.json new file mode 100644 index 0000000..52e7384 --- /dev/null +++ b/clmctest/streaming/dashboard.json @@ -0,0 +1 @@ +{"id":1,"cells":[{"i":"396b0b14-1482-4b8a-a359-f144541170a4","x":6,"y":8,"w":6,"h":4,"name":"AdaptiveStreaming_SF_NetworkBytesSentPerSecond","queries":[{"query":"SELECT derivative(mean(\"bytes_sent\"), 1s) AS \"bytes_sent_per_second\" FROM \"CLMCMetrics\".\"autogen\".\"net\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx2' GROUP BY :interval:","queryConfig":{"database":"","measurement":"","retentionPolicy":"","fields":[],"tags":{},"groupBy":{"time":"","tags":[]},"areTagsAccepted":false,"rawText":"SELECT derivative(mean(\"bytes_sent\"), 1s) AS \"bytes_sent_per_second\" FROM \"CLMCMetrics\".\"autogen\".\"net\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx2' GROUP BY :interval:","range":null}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/1/cells/396b0b14-1482-4b8a-a359-f144541170a4"}},{"i":"480b4037-a816-4e1c-8c84-edb39b0c1f6d","x":0,"y":8,"w":6,"h":4,"name":"AdapativeStreaming_SF_NetworkBytesSentPerSecond","queries":[{"query":"SELECT derivative(mean(\"bytes_sent\"), 1s) AS \"bytes_sent_per_second\" FROM \"CLMCMetrics\".\"autogen\".\"net\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx1' GROUP BY :interval:","queryConfig":{"database":"","measurement":"","retentionPolicy":"","fields":[],"tags":{},"groupBy":{"time":"","tags":[]},"areTagsAccepted":false,"rawText":"SELECT derivative(mean(\"bytes_sent\"), 1s) AS \"bytes_sent_per_second\" FROM \"CLMCMetrics\".\"autogen\".\"net\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx1' GROUP BY :interval:","range":null}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/1/cells/480b4037-a816-4e1c-8c84-edb39b0c1f6d"}},{"i":"6ad170aa-c5f2-4930-a604-1e88579dffee","x":6,"y":4,"w":6,"h":4,"name":"AdaptiveStreaming_SF2_CPU","queries":[{"query":"SELECT 100-mean(\"usage_idle\") AS \"mean_usage_idle\" FROM \"CLMCMetrics\".\"autogen\".\"cpu\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx2' GROUP BY :interval:","queryConfig":{"database":"","measurement":"","retentionPolicy":"","fields":[],"tags":{},"groupBy":{"time":"","tags":[]},"areTagsAccepted":false,"rawText":"SELECT 100-mean(\"usage_idle\") AS \"mean_usage_idle\" FROM \"CLMCMetrics\".\"autogen\".\"cpu\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx2' GROUP BY :interval:","range":null}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/1/cells/6ad170aa-c5f2-4930-a604-1e88579dffee"}},{"i":"7e424259-32b8-40be-aa53-477aaf801f0e","x":0,"y":4,"w":6,"h":4,"name":"AdaptiveStreaming_SF1_CPU","queries":[{"query":"SELECT 100-mean(\"usage_idle\") AS \"mean_usage_idle\" FROM \"CLMCMetrics\".\"autogen\".\"cpu\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx1' GROUP BY :interval:","queryConfig":{"database":"","measurement":"","retentionPolicy":"","fields":[],"tags":{},"groupBy":{"time":"","tags":[]},"areTagsAccepted":false,"rawText":"SELECT 100-mean(\"usage_idle\") AS \"mean_usage_idle\" FROM \"CLMCMetrics\".\"autogen\".\"cpu\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx1' GROUP BY :interval:","range":null}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/1/cells/7e424259-32b8-40be-aa53-477aaf801f0e"}},{"i":"a095c820-8bac-45fe-974d-4030e1bb8770","x":6,"y":0,"w":6,"h":4,"name":"AdaptiveStreaming_SF2_ActiveConnections","queries":[{"query":"SELECT mean(\"active\") AS \"mean_active\" FROM \"CLMCMetrics\".\"autogen\".\"nginx\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx2' GROUP BY :interval:","label":"nginx.active","queryConfig":{"database":"CLMCMetrics","measurement":"nginx","retentionPolicy":"autogen","fields":[{"field":"active","funcs":["mean"]}],"tags":{"ipendpoint":["adaptive_streaming_I1_nginx2"]},"groupBy":{"time":"auto","tags":[]},"areTagsAccepted":true,"rawText":null,"range":null}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/1/cells/a095c820-8bac-45fe-974d-4030e1bb8770"}},{"i":"63a7e85a-b411-46be-9478-8479405379a3","x":0,"y":0,"w":6,"h":4,"name":"AdaptiveStreaming_SF1_ActiveConnections","queries":[{"query":"SELECT mean(\"active\") AS \"mean_active\" FROM \"CLMCMetrics\".\"autogen\".\"nginx\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx1' GROUP BY :interval:","label":"nginx.active","queryConfig":{"database":"CLMCMetrics","measurement":"nginx","retentionPolicy":"autogen","fields":[{"field":"active","funcs":["mean"]}],"tags":{"ipendpoint":["adaptive_streaming_I1_nginx1"]},"groupBy":{"time":"auto","tags":[]},"areTagsAccepted":true,"rawText":null,"range":null}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/1/cells/63a7e85a-b411-46be-9478-8479405379a3"}}],"templates":[],"name":"Adaptive Streaming Experiment Dashboard","links":{"self":"/chronograf/v1/dashboards/1","cells":"/chronograf/v1/dashboards/1/cells","templates":"/chronograf/v1/dashboards/1/templates"}} diff --git a/clmctest/streaming/influx.json b/clmctest/streaming/influx.json new file mode 100644 index 0000000..34bb14a --- /dev/null +++ b/clmctest/streaming/influx.json @@ -0,0 +1,7 @@ +{ + "id": "1", + "name": "Influx 1", + "url": "http://localhost:8086", + "default": true, + "telegraf": "CLMCMetrics" +} \ No newline at end of file diff --git a/clmctest/streaming/kapacitor.conf b/clmctest/streaming/kapacitor.conf new file mode 100644 index 0000000..e8332d6 --- /dev/null +++ b/clmctest/streaming/kapacitor.conf @@ -0,0 +1,699 @@ +# The hostname of this node. +# Must be resolvable by any configured InfluxDB hosts. +hostname = "localhost" +# Directory for storing a small amount of metadata about the server. +data_dir = "/var/lib/kapacitor" + +# Do not apply configuration overrides during startup. +# Useful if the configuration overrides cause Kapacitor to fail startup. +# This option is intended as a safe guard and should not be needed in practice. +skip-config-overrides = false + +# Default retention-policy, if a write is made to Kapacitor and +# it does not have a retention policy associated with it, +# then the retention policy will be set to this value +default-retention-policy = "" + +[http] + # HTTP API Server for Kapacitor + # This server is always on, + # it serves both as a write endpoint + # and as the API endpoint for all other + # Kapacitor calls. + bind-address = ":9092" + log-enabled = true + write-tracing = false + pprof-enabled = false + https-enabled = false + https-certificate = "/etc/ssl/kapacitor.pem" + +[config-override] + # Enable/Disable the service for overridding configuration via the HTTP API. + enabled = true + +[logging] + # Destination for logs + # Can be a path to a file or 'STDOUT', 'STDERR'. + file = "/var/log/kapacitor/kapacitor.log" + # Logging level can be one of: + # DEBUG, INFO, ERROR + # HTTP logging can be disabled in the [http] config section. + level = "INFO" + +[load] + # Enable/Disable the service for loading tasks/templates/handlers + # from a directory + enabled = true + # Directory where task/template/handler files are set + dir = "/etc/kapacitor/load" + + +[replay] + # Where to store replay files, aka recordings. + dir = "/var/lib/kapacitor/replay" + +[task] + # Where to store the tasks database + # DEPRECATED: This option is not needed for new installations. + # It is only used to determine the location of the task.db file + # for migrating to the new `storage` service. + dir = "/var/lib/kapacitor/tasks" + # How often to snapshot running task state. + snapshot-interval = "60s" + +[storage] + # Where to store the Kapacitor boltdb database + boltdb = "/var/lib/kapacitor/kapacitor.db" + +[deadman] + # Configure a deadman's switch + # Globally configure deadman's switches on all tasks. + # NOTE: for this to be of use you must also globally configure at least one alerting method. + global = false + # Threshold, if globally configured the alert will be triggered if the throughput in points/interval is <= threshold. + threshold = 0.0 + # Interval, if globally configured the frequency at which to check the throughput. + interval = "10s" + # Id -- the alert Id, NODE_NAME will be replaced with the name of the node being monitored. + id = "node 'NODE_NAME' in task '{{ .TaskName }}'" + # The message of the alert. INTERVAL will be replaced by the interval. + message = "{{ .ID }} is {{ if eq .Level \"OK\" }}alive{{ else }}dead{{ end }}: {{ index .Fields \"collected\" | printf \"%0.3f\" }} points/INTERVAL." + + +# Multiple InfluxDB configurations can be defined. +# Exactly one must be marked as the default. +# Each one will be given a name and can be referenced in batch queries and InfluxDBOut nodes. +[[influxdb]] + # Connect to an InfluxDB cluster + # Kapacitor can subscribe, query and write to this cluster. + # Using InfluxDB is not required and can be disabled. + enabled = true + default = true + name = "localhost" + urls = ["http://localhost:8086"] + username = "" + password = "" + timeout = 0 + # Absolute path to pem encoded CA file. + # A CA can be provided without a key/cert pair + # ssl-ca = "/etc/kapacitor/ca.pem" + # Absolutes paths to pem encoded key and cert files. + # ssl-cert = "/etc/kapacitor/cert.pem" + # ssl-key = "/etc/kapacitor/key.pem" + + # Do not verify the TLS/SSL certificate. + # This is insecure. + insecure-skip-verify = false + + # Maximum time to try and connect to InfluxDB during startup + startup-timeout = "5m" + + # Turn off all subscriptions + disable-subscriptions = false + + # Subscription mode is either "cluster" or "server" + subscription-mode = "cluster" + + # Which protocol to use for subscriptions + # one of 'udp', 'http', or 'https'. + subscription-protocol = "http" + + # Subscriptions resync time interval + # Useful if you want to subscribe to new created databases + # without restart Kapacitord + subscriptions-sync-interval = "1m0s" + + # Override the global hostname option for this InfluxDB cluster. + # Useful if the InfluxDB cluster is in a separate network and + # needs special config to connect back to this Kapacitor instance. + # Defaults to `hostname` if empty. + kapacitor-hostname = "" + + # Override the global http port option for this InfluxDB cluster. + # Useful if the InfluxDB cluster is in a separate network and + # needs special config to connect back to this Kapacitor instance. + # Defaults to the port from `[http] bind-address` if 0. + http-port = 0 + + # Host part of a bind address for UDP listeners. + # For example if a UDP listener is using port 1234 + # and `udp-bind = "hostname_or_ip"`, + # then the UDP port will be bound to `hostname_or_ip:1234` + # The default empty value will bind to all addresses. + udp-bind = "" + # Subscriptions use the UDP network protocl. + # The following options of for the created UDP listeners for each subscription. + # Number of packets to buffer when reading packets off the socket. + udp-buffer = 1000 + # The size in bytes of the OS read buffer for the UDP socket. + # A value of 0 indicates use the OS default. + udp-read-buffer = 0 + + [influxdb.subscriptions] + # Set of databases and retention policies to subscribe to. + # If empty will subscribe to all, minus the list in + # influxdb.excluded-subscriptions + # + # Format + # db_name = <list of retention policies> + # + # Example: + # my_database = [ "default", "longterm" ] + [influxdb.excluded-subscriptions] + # Set of databases and retention policies to exclude from the subscriptions. + # If influxdb.subscriptions is empty it will subscribe to all + # except databases listed here. + # + # Format + # db_name = <list of retention policies> + # + # Example: + # my_database = [ "default", "longterm" ] + +[kubernetes] + # Enable/Disable the kubernetes service. + # Needed by the k8sAutoscale TICKscript node. + enabled = false + # There are several ways to connect to the kubernetes API servers: + # + # Via the proxy, start the proxy via the `kubectl proxy` command: + # api-servers = ["http://localhost:8001"] + # + # From within the cluster itself, in which case + # kubernetes secrets and DNS services are used + # to determine the needed configuration. + # in-cluster = true + # + # Direct connection, in which case you need to know + # the URL of the API servers, the authentication token and + # the path to the ca cert bundle. + # These value can be found using the `kubectl config view` command. + # api-servers = ["http://192.168.99.100:8443"] + # token = "..." + # ca-path = "/path/to/kubernetes/ca.crt" + # + # Kubernetes can also serve as a discoverer for scrape targets. + # In that case the type of resources to discoverer must be specified. + # Valid values are: "node", "pod", "service", and "endpoint". + # resource = "pod" + + + +[smtp] + # Configure an SMTP email server + # Will use TLS and authentication if possible + # Only necessary for sending emails from alerts. + enabled = false + host = "localhost" + port = 25 + username = "" + password = "" + # From address for outgoing mail + from = "" + # List of default To addresses. + # to = ["oncall@example.com"] + + # Skip TLS certificate verify when connecting to SMTP server + no-verify = false + # Close idle connections after timeout + idle-timeout = "30s" + + # If true the all alerts will be sent via Email + # without explicitly marking them in the TICKscript. + global = false + # Only applies if global is true. + # Sets all alerts in state-changes-only mode, + # meaning alerts will only be sent if the alert state changes. + state-changes-only = false + +[snmptrap] + # Configure an SNMP trap server + enabled = false + # The host:port address of the SNMP trap server + addr = "localhost:162" + # The community to use for traps + community = "kapacitor" + # Number of retries when sending traps + retries = 1 + + +[opsgenie] + # Configure OpsGenie with your API key and default routing key. + enabled = false + # Your OpsGenie API Key. + api-key = "" + # Default OpsGenie teams, can be overridden per alert. + # teams = ["team1", "team2"] + # Default OpsGenie recipients, can be overridden per alert. + # recipients = ["recipient1", "recipient2"] + # The OpsGenie API URL should not need to be changed. + url = "https://api.opsgenie.com/v1/json/alert" + # The OpsGenie Recovery URL, you can change this + # based on which behavior you want a recovery to + # trigger (Add Notes, Close Alert, etc.) + recovery_url = "https://api.opsgenie.com/v1/json/alert/note" + # If true then all alerts will be sent to OpsGenie + # without explicitly marking them in the TICKscript. + # The team and recipients can still be overridden. + global = false + +[victorops] + # Configure VictorOps with your API key and default routing key. + enabled = false + # Your VictorOps API Key. + api-key = "" + # Default VictorOps routing key, can be overridden per alert. + routing-key = "" + # The VictorOps API URL should not need to be changed. + url = "https://alert.victorops.com/integrations/generic/20131114/alert" + # If true the all alerts will be sent to VictorOps + # without explicitly marking them in the TICKscript. + # The routing key can still be overridden. + global = false + # Use JSON for the "data" field + # New installations will want to set this to true as it makes + # the data that triggered the alert available within VictorOps. + # The default is "false" for backwards compatibility reasons. + # json-data = false + +[pagerduty] + # Configure PagerDuty. + enabled = false + # Your PagerDuty Service Key. + service-key = "" + # The PagerDuty API URL should not need to be changed. + url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" + # If true the all alerts will be sent to PagerDuty + # without explicitly marking them in the TICKscript. + global = false + +[pushover] + # Configure Pushover. + enabled = false + # Your Pushover API token. + token = "" + # Your Pushover USER_TOKEN. + user-key = "" + # The URL for the Pushover API. + url = "https://api.pushover.net/1/messages.json" + +########################################## +# Configure Alert POST request Endpoints + +# As ENV variables: +# KAPACITOR_HTTPPOST_0_ENDPOINT = "example" +# KAPACITOR_HTTPPOST_0_URL = "http://example.com" +# KAPACITOR_HTTPPOST_0_HEADERS_Example = "header" + +# [[httppost]] +# endpoint = "example" +# url = "http://example.com" +# headers = { Example = "your-key" } +# basic-auth = { username = "my-user", password = "my-pass" } +# +# # Provide an alert template for constructing a custom HTTP body. +# # Alert templates are only used with post alert handlers as they consume alert data. +# # The template uses https://golang.org/pkg/text/template/ and has access to the following fields: +# # * .ID - The unique ID for this alert +# # * .Message - The message of the alert +# # * .Details - The details of the alert +# # * .Time - The time the alert event occurred +# # * .Duration - The duration of the alert event. +# # * .Level - The level of the alert, i.e INFO, WARN, or CRITICAL. +# # * .Data - The data that triggered the alert. +# # +# # Specify the template inline. +# alert-template = "{{.Message}}:{{range .Data.Series}}{{.Tags}},{{range .Values}}{{.}}{{end}}{{end}}" +# # Specify an absolute path to a template file. +# alert-template-file = "/path/to/template/file" +# +# # Provide a row template for constructing a custom HTTP body. +# # Row templates are only used with httpPost pipeline nodes as they consume a row at a time. +# # The template uses https://golang.org/pkg/text/template/ and has access to the following fields: +# # * .Name - The measurement name of the data stream +# # * .Tags - A map of tags on the data. +# # * .Values - A list of values, each entry is a map containing a "time" key for the time of the point +# # and keys for all other fields on the point. +# # +# # Specify the template inline. +# row-template = "{{.Name}} host={{index .Tags \"host\"}}{{range .Values}} {{index . "time"}} {{index . "value"}}{{end}}" +# # Specify an absolute path to a template file. +# row-template-file = "/path/to/template/file" + +[slack] + # Configure Slack. + enabled = true + # The Slack webhook URL, can be obtained by adding + # an Incoming Webhook integration. + # Visit https://slack.com/services/new/incoming-webhook + # to add new webhook for Kapacitor. + url = "https://hooks.slack.com/services/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" + # Default channel for messages + channel = "#clmc" + # If true all the alerts will be sent to Slack + # without explicitly marking them in the TICKscript. + global = false + # Only applies if global is true. + # Sets all alerts in state-changes-only mode, + # meaning alerts will only be sent if the alert state changes. + state-changes-only = false + +[telegram] + # Configure Telegram. + enabled = false + # The Telegram Bot URL should not need to be changed. + url = "https://api.telegram.org/bot" + # Telegram Bot Token, can be obtained From @BotFather. + token = "" + # Default recipient for messages, Contact @myidbot on Telegram to get an ID. + chat-id = "" + # Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your alert message. + #parse-mode = "Markdown" + # Disable link previews for links in this message + disable-web-page-preview = false + # Sends the message silently. iOS users will not receive a notification, Android users will receive a notification with no sound. + disable-notification = false + # If true the all alerts will be sent to Telegram + # without explicitly marking them in the TICKscript. + global = false + # Only applies if global is true. + # Sets all alerts in state-changes-only mode, + # meaning alerts will only be sent if the alert state changes. + state-changes-only = false + +[hipchat] + # Configure HipChat. + enabled = false + # The HipChat API URL. Replace subdomain with your + # HipChat subdomain. + # url = "https://subdomain.hipchat.com/v2/room" + # Visit https://www.hipchat.com/docs/apiv2 + # for information on obtaining your room id and + # authentication token. + # Default room for messages + room = "" + # Default authentication token + token = "" + # If true then all alerts will be sent to HipChat + # without explicitly marking them in the TICKscript. + global = false + # Only applies if global is true. + # Sets all alerts in state-changes-only mode, + # meaning alerts will only be sent if the alert state changes. + state-changes-only = false + +[alerta] + # Configure Alerta. + enabled = false + # The Alerta URL. + url = "" + # Default authentication token. + token = "" + # Default token prefix + # If you are on older versions of alerta you may need to change this to "Key" + token-prefix = "Bearer" + # Default environment. + environment = "" + # Default origin. + origin = "kapacitor" + +[sensu] + # Configure Sensu. + enabled = false + # The Sensu Client host:port address. + addr = "sensu-client:3030" + # Default JIT source. + source = "Kapacitor" + +[reporting] + # Send usage statistics + # every 12 hours to Enterprise. + enabled = true + url = "https://usage.influxdata.com" + +[stats] + # Emit internal statistics about Kapacitor. + # To consume these stats create a stream task + # that selects data from the configured database + # and retention policy. + # + # Example: + # stream|from().database('_kapacitor').retentionPolicy('autogen')... + # + enabled = true + stats-interval = "10s" + database = "_kapacitor" + retention-policy= "autogen" + +[udf] +# Configuration for UDFs (User Defined Functions) +[udf.functions] + # Example go UDF. + # First compile example: + # go build -o avg_udf ./udf/agent/examples/moving_avg.go + # + # Use in TICKscript like: + # stream.goavg() + # .field('value') + # .size(10) + # .as('m_average') + # + # uncomment to enable + #[udf.functions.goavg] + # prog = "./avg_udf" + # args = [] + # timeout = "10s" + + # Example python UDF. + # Use in TICKscript like: + # stream.pyavg() + # .field('value') + # .size(10) + # .as('m_average') + # + # uncomment to enable + #[udf.functions.pyavg] + # prog = "/usr/bin/python2" + # args = ["-u", "./udf/agent/examples/moving_avg.py"] + # timeout = "10s" + # [udf.functions.pyavg.env] + # PYTHONPATH = "./udf/agent/py" + + # Example UDF over a socket + #[udf.functions.myCustomUDF] + # socket = "/path/to/socket" + # timeout = "10s" + +[talk] + # Configure Talk. + enabled = false + # The Talk webhook URL. + url = "https://jianliao.com/v2/services/webhook/uuid" + # The default authorName. + author_name = "Kapacitor" + +# MQTT client configuration. +# Mutliple different clients may be configured by +# repeating [[mqtt]] sections. +[[mqtt]] + enabled = false + # Unique name for this broker configuration + name = "localhost" + # Whether this broker configuration is the default + default = true + # URL of the MQTT broker. + # Possible protocols include: + # tcp - Raw TCP network connection + # ssl - TLS protected TCP network connection + # ws - Websocket network connection + url = "tcp://localhost:1883" + + # TLS/SSL configuration + # A CA can be provided without a key/cert pair + # ssl-ca = "/etc/kapacitor/ca.pem" + # Absolutes paths to pem encoded key and cert files. + # ssl-cert = "/etc/kapacitor/cert.pem" + # ssl-key = "/etc/kapacitor/key.pem" + + # Unique ID for this MQTT client. + # If empty used the value of "name" + client-id = "" + + # Username + username = "" + # Password + password = "" + +[[swarm]] + # Enable/Disable the Docker Swarm service. + # Needed by the swarmAutoscale TICKscript node. + enabled = false + # Unique ID for this Swarm cluster + # NOTE: This is not the ID generated by Swarm rather a user defined + # ID for this cluster since Kapacitor can communicate with multiple clusters. + id = "" + # List of URLs for Docker Swarm servers. + servers = ["http://localhost:2376"] + # TLS/SSL Configuration for connecting to secured Docker daemons + ssl-ca = "" + ssl-cert = "" + ssl-key = "" + insecure-skip-verify = false + +################################## +# Input Methods, same as InfluxDB +# + +[collectd] + enabled = false + bind-address = ":25826" + database = "collectd" + retention-policy = "" + batch-size = 1000 + batch-pending = 5 + batch-timeout = "10s" + typesdb = "/usr/share/collectd/types.db" + +[opentsdb] + enabled = false + bind-address = ":4242" + database = "opentsdb" + retention-policy = "" + consistency-level = "one" + tls-enabled = false + certificate = "/etc/ssl/influxdb.pem" + batch-size = 1000 + batch-pending = 5 + batch-timeout = "1s" + +# Service Discovery and metric scraping + +[[scraper]] + enabled = false + name = "myscraper" + # Specify the id of a discoverer service specified below + discoverer-id = "" + # Specify the type of discoverer service being used. + discoverer-service = "" + db = "prometheus_raw" + rp = "autogen" + type = "prometheus" + scheme = "http" + metrics-path = "/metrics" + scrape-interval = "1m0s" + scrape-timeout = "10s" + username = "" + password = "" + bearer-token = "" + ssl-ca = "" + ssl-cert = "" + ssl-key = "" + ssl-server-name = "" + insecure-skip-verify = false + +# Supported discovery services + +[[azure]] + enabled = false + id = "myazure" + port = 80 + subscription-id = "" + tenant-id = "" + client-id = "" + client-secret = "" + refresh-interval = "5m0s" + +[[consul]] + enabled = false + id = "myconsul" + address = "127.0.0.1:8500" + token = "" + datacenter = "" + tag-separator = "," + scheme = "http" + username = "" + password = "" + ssl-ca = "" + ssl-cert = "" + ssl-key = "" + ssl-server-name = "" + insecure-skip-verify = false + +[[dns]] + enabled = false + id = "mydns" + refresh-interval = "30s" + ## Type can be SRV, A, or AAAA + type = "SRV" + ## Port is the port to scrape for records returned by A or AAAA types + port = 80 + +[[ec2]] + enabled = false + id = "myec2" + region = "us-east-1" + access-key = "" + secret-key = "" + profile = "" + refresh-interval = "1m0s" + port = 80 + +[[file-discovery]] + enabled = false + id = "myfile" + refresh-interval = "5m0s" + files = [] + +[[gce]] + enabled = false + id = "mygce" + project = "" + zone = "" + filter = "" + refresh-interval = "1m0s" + port = 80 + tag-separator = "," + +[[marathon]] + enabled = false + id = "mymarathon" + timeout = "30s" + refresh-interval = "30s" + bearer-token = "" + ssl-ca = "" + ssl-cert = "" + ssl-key = "" + ssl-server-name = "" + insecure-skip-verify = false + +[[nerve]] + enabled = false + id = "mynerve" + timeout = "10s" + +[[serverset]] + enabled = false + id = "myserverset" + timeout = "10s" + +[[static-discovery]] + enabled = false + id = "mystatic" + targets = ["localhost:9100"] + [static.labels] + region = "us-east-1" + +[[triton]] + enabled = false + id = "mytriton" + account = "" + dns-suffix = "" + endpoint = "" + port = 9163 + refresh-interval = "1m0s" + version = 1 + ssl-ca = "" + ssl-cert = "" + ssl-key = "" + ssl-server-name = "" + insecure-skip-verify = false diff --git a/clmctest/streaming/kapacitor.json b/clmctest/streaming/kapacitor.json new file mode 100644 index 0000000..6011886 --- /dev/null +++ b/clmctest/streaming/kapacitor.json @@ -0,0 +1,6 @@ +{ + "id": "1", + "name": "CLMCKapacitor", + "url": "http://localhost:9092", + "active": false +} \ No newline at end of file diff --git a/clmctest/streaming/manual.md b/clmctest/streaming/manual.md new file mode 100644 index 0000000..7db0fc7 --- /dev/null +++ b/clmctest/streaming/manual.md @@ -0,0 +1,146 @@ +<!-- +// © University of Southampton IT Innovation Centre, 2017 +// +// Copyright in this software belongs to University of Southampton +// IT Innovation Centre of Gamma House, Enterprise Road, +// Chilworth Science Park, Southampton, SO16 7NS, UK. +// +// This software may not be used, sold, licensed, transferred, copied +// or reproduced in whole or in part in any manner or form or in or +// on any media by any person other than in accordance with the terms +// of the Licence Agreement supplied with the software, or otherwise +// without the prior written consent of the copyright owners. +// +// This software is distributed WITHOUT ANY WARRANTY, without even the +// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE, except where stated in the Licence Agreement supplied with +// the software. +// +// Created By : Michael Boniface +// Updated By : Simon Crowle +// Created Date : 18-12-2017 +// Update Date : 14-02-2018 +// Created for Project : FLAME +--> + +# CLMC Adaptive Streaming Test + +This test streams mpeg-dash video using the two nginx servers monitored by Telegraf configured with a default apache plugin and a net_response plugin. The data is stored in the `clmc-service` using database `CLMCMetrics` and measurements `nginx` and `net_response` + +The following command brings up the services + +`vagrant --fixture=streaming up` + +* clmc-service: configured with influx, kapacitor, chornograf +* nginx1@DC1, nginx2@DC2: configured with nginx and a test video located at http://192.168.50.11:80/test_video/stream.mpd on the internal vbox network and at http://localhost:8081/test_video/stream.mpd if accessing from the host machine + +### Run the test set-up + +`vagrant --fixture=streaming ssh clmc-service -- "sudo /vagrant/test/streaming/setupCLMC.sh /vagrant/test/streaming"` +`vagrant --fixture=streaming ssh nginx1 -- "sudo /vagrant/test/streaming/setupNGINX.sh"` + +### Run the automated test + +To run the load test using the following command (here, the last parameter '15' refers to the number of VLC player clients to be launched): + +`vagrant --fixture=streaming ssh loadtest-streaming -- "sudo /vagrant/test/streaming/run.sh /home/ubuntu/test/streaming http://192.168.50.11/test_video/stream.mpd 15` + +This test currently just generates the load and does not have any assertions. It breaks at 1000. + +And then point your browser to the Chronograf dashboard: + +`http://localhost:8888` + +### Run the automated PyTests + +SSH into the clmc-service VM: + +`vagrant --fixture=streaming ssh clmc-service` + +Run the automated tests written in pytest: + +`pytest -s /vagrant/test/streaming/` + +### Manual test + +## Manual set-up of Chronograf's CLMC data source + +If you __do not__ want to run the automatic set-up, basic entry to the Chronograf dashboard is as follows: + +1. Point your browser to: [http://localhost:8888](http://localhost:8888) +2. Enter your connection string: `http://localhost:8086` +3. Enter the Name: `Influx 1` +4. Enter the Telegraf database: `CLMCMetrics` + +## Manual test on Windows + +### View the video +Install VLC video client on the host machine, you must use a very recent version otherwise the MPD file cannot we read. At the time of writng the following nighly build was installed: + +https://nightlies.videolan.org/build/win32/vlc-3.0.0-rc1-20171201-0326/vlc-3.0.0-20171201-0326-rc1-win32.exe + +Start the VLC Player + +`Media->Open Network Stream` + +The test video is the FLAME project video and it can be viewed at the following location. + +`Enter the network URL: http://localhost:8081/test_video/stream.mpd for nginx1 server` + +The video should play. + +### Query the data + +Open Chronograph by entering the following URL into a browser on the host http://localhost:8888. Your CLMC data source, Kapacitor and demonstration dashboard should be ready for you to explore. + +Press the Data Explorer in the menu and select the nginx measurement and create a query such as + +`SELECT mean("requests") AS "mean_requests" FROM "CLMCMetrics"."autogen"."nginx" WHERE time > now() - 1h GROUP BY time(10s)` + +## KPI triggers + +In this demonstrator an example KPI rule has been set up in Kapacitor which fires when the average number of active connections per 5 seconds on the Nginx 1 or Nginx 2 server goes above certain thresholds ( a 'warning' at 10 connections/5 seconds ). The TICKscript specification for this rule is as follows: + +``` +dbrp "CLMCMetrics"."autogen" + +// Nginx 1 rule +// ------------- +var n1Data = batch + |query(''' SELECT mean("active") AS "mean_active" FROM "CLMCMetrics"."autogen"."nginx" WHERE "ipendpoint"='adaptive_streaming_I1_nginx1' ''') + .period(5s) + .every(5s) + +varn n1Alert = n1Data + |alert() + .id('{{ .Name }}/adaptive_streaming_I1_nginx1') + .message('{{ .ID }} is {{ .Level }} Mean active connections: {{ index .Fields "mean_active" }}') + .warn(lambda: "mean_active" > 10) + .slack() + .log( '/tmp/RPSLoad.log' ) + +// Nginx 2 rule +// ------------- +var n2Data = batch + |query(''' SELECT mean("active") AS "mean_active" FROM "CLMCMetrics"."autogen"."nginx" WHERE "ipendpoint"='adaptive_streaming_I1_nginx2' ''') + .period(5s) + .every(5s) + +var n2Alert = n2Data + |alert() + .id('{{ .Name }}/adaptive_streaming_I1_nginx2') + .message('{{ .ID }} is {{ .Level }} Mean active connections: {{ index .Fields "mean_active" }}') + .warn(lambda: "mean_active" > 10) + .slack() + .log( '/tmp/RPSLoad.log' ) +``` + +Alerts are sent to both an internal logging within the CLMC service file system and also to a FLAME demo Slack service: + +https://flamedemo-itinnov.slack.com + +Alerts can be found under the '#clmc' channel. + +### Kapacitor rules in Chronograf's GUI + +Additional rules can be added to this demonstrator either via the Chronograf GUI (see [here](https://docs.influxdata.com/chronograf/v1.4/introduction/getting-started/#4-connect-chronograf-to-kapacitor) for more information) or by using the Kapacitor HTTP API and TICKscript (for an introduction, [look here](https://docs.influxdata.com/kapacitor/v1.4/tick/)). diff --git a/clmctest/streaming/report.sh b/clmctest/streaming/report.sh new file mode 100644 index 0000000..ad1251a --- /dev/null +++ b/clmctest/streaming/report.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# This script reads stdin and expects the output of cvlc. +# It is used by the run.sh script and receives the output of the cvlc client. +# It counts the number of times the frame "dropping" error is seen and every 10 times it sends a message to telegraf reporting "another 10" errors. + +if [ "$#" -ne 1 ]; then + echo "Error: illegal number of arguments: "$# + echo "Usage: report.sh <client number>" + exit +fi + +COUNTER=$1 +TELEGRAF=http://localhost:8186 + +ERR_COUNT=0 +while read line; do + if [[ $line = *"dropping"* ]]; then + ERR_COUNT=$(($ERR_COUNT + 1)) + fi + TEN=$((ERR_COUNT % 10)) + if [ $TEN -eq 0 ]; then + curl -i -XPOST "${TELEGRAF}/write?precision=s" --data-binary "vlc,client=${COUNTER} drop_error=10 $(date +%s)" >& /dev/null + fi +done \ No newline at end of file diff --git a/clmctest/streaming/rspec.yml b/clmctest/streaming/rspec.yml new file mode 100644 index 0000000..b1291a3 --- /dev/null +++ b/clmctest/streaming/rspec.yml @@ -0,0 +1,64 @@ +hosts: + - name: clmc-service + cpus: 1 + memory: 2048 + disk: "10GB" + forward_ports: + - guest: 8086 + host: 8086 + - guest: 8888 + host: 8888 + - guest: 9092 + host: 9092 + ip_address: "192.168.50.10" + - name: nginx1 + cpus: 1 + memory: 2048 + disk: "10GB" + service_name: "nginx" + forward_ports: + - guest: 80 + host: 8081 + ip_address: "192.168.50.11" + location: "DC1" + sfc_id: "MS_Template_1" + sfc_id_instance: "MS_I1" + sf_id: "adaptive_streaming" + sf_id_instance: "adaptive_streaming_I1" + ipendpoint_id: "adaptive_streaming_I1_nginx1" + influxdb_url: "http://192.168.50.10:8086" + database_name: "CLMCMetrics" + - name: nginx2 + cpus: 1 + memory: 2048 + disk: "10GB" + service_name: "nginx" + forward_ports: + - guest: 80 + host: 8082 + ip_address: "192.168.50.12" + location: "DC2" + sfc_id: "MS_Template_1" + sfc_id_instance: "MS_I1" + sf_id: "adaptive_streaming" + sf_id_instance: "adaptive_streaming_I1" + ipendpoint_id: "adaptive_streaming_I1_nginx2" + influxdb_url: "http://192.168.50.10:8086" + database_name: "CLMCMetrics" + - name: loadtest-streaming + cpus: 2 + memory: 4096 + disk: "10GB" + service_name: "loadtest-streaming" + forward_ports: + - guest: 80 + host: 8083 + ip_address: "192.168.50.13" + location: "DC1" + sfc_id: "MS_Template_1" + sfc_id_instance: "MS_I1" + sf_id: "adaptive_streaming_client" + sf_id_instance: "adaptive_streaming_I1" + ipendpoint_id: "adaptive_streaming_I1_client1" + influxdb_url: "http://192.168.50.10:8086" + database_name: "CLMCMetrics" diff --git a/clmctest/streaming/rules.json b/clmctest/streaming/rules.json new file mode 100644 index 0000000..faad48b --- /dev/null +++ b/clmctest/streaming/rules.json @@ -0,0 +1,9 @@ +{ + "id" : "Request_Rate_Alert_NGINXServers", + "type" : "batch", + "dbrps" : [{"db": "CLMCMetrics", "rp" : "autogen"}], + + "script" : "\/\/ NGINX 1 Rule\r\n\/\/ -------------\r\nvar n1Data = batch\r\n |query(''' SELECT mean(\"active\") AS \"mean_active\" FROM \"CLMCMetrics\".\"autogen\".\"nginx\" WHERE \"ipendpoint\"='adaptive_streaming_I1_nginx1' ''')\r\n .period(5s)\r\n .every(5s)\r\n\r\nvar n1Alert = n1Data\r\n |alert()\r\n .id('{{ .Name }}\/adaptive_streaming_I1_nginx1')\r\n .message('{{ .ID }} is {{ .Level }} Mean active connections: {{ index .Fields \"mean_active\" }}')\r\n .warn(lambda: \"mean_active\" > 10)\r\n .slack()\r\n .log( '\/tmp\/RPSLoad.log' )\r\n\r\n\/\/ NGINX 2 Rule\r\n\/\/ -------------\r\nvar n2Data = batch\r\n |query(''' SELECT mean(\"active\") AS \"mean_active\" FROM \"CLMCMetrics\".\"autogen\".\"nginx\" WHERE \"ipendpoint\"='adaptive_streaming_I1_nginx2' ''')\r\n .period(5s)\r\n .every(5s)\r\n\r\nvar n2Alert = n2Data\r\n |alert()\r\n .id('{{ .Name }}\/adaptive_streaming_I1_nginx2')\r\n .message('{{ .ID }} is {{ .Level }} Mean active connections: {{ index .Fields \"mean_active\" }}')\r\n .warn(lambda: \"mean_active\" > 10)\r\n .slack()\r\n .log( '\/tmp\/RPSLoad.log' )", + + "status" : "enabled" +} \ No newline at end of file diff --git a/clmctest/streaming/run.sh b/clmctest/streaming/run.sh new file mode 100644 index 0000000..81c7d5f --- /dev/null +++ b/clmctest/streaming/run.sh @@ -0,0 +1,57 @@ +#!/bin/bash +#///////////////////////////////////////////////////////////////////////// +#// +#// (c) University of Southampton IT Innovation Centre, 2017 +#// +#// Copyright in this software belongs to University of Southampton +#// IT Innovation Centre of Gamma House, Enterprise Road, +#// Chilworth Science Park, Southampton, SO16 7NS, UK. +#// +#// This software may not be used, sold, licensed, transferred, copied +#// or reproduced in whole or in part in any manner or form or in or +#// on any media by any person other than in accordance with the terms +#// of the Licence Agreement supplied with the software, or otherwise +#// without the prior written consent of the copyright owners. +#// +#// This software is distributed WITHOUT ANY WARRANTY, without even the +#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +#// PURPOSE, except where stated in the Licence Agreement supplied with +#// the software. +#// +#// Created By : Michael Boniface +#// Created Date : 15/02/2017 +#// Created for Project : FLAME +#// +#///////////////////////////////////////////////////////////////////////// + +if [ "$#" -ne 3 ]; then + echo "Error: illegal number of arguments: "$# + echo "Usage: run.sh TEST_RUN_DIR STREAM_URI MAX_CLIENTS" + exit +fi + +# create test directories +TEST_FOLDER=$(date +%Y%m%d%H%M%S) +TEST_RUN_DIR=$1 +TEST_DIR=$TEST_RUN_DIR"/streaming/"$TEST_FOLDER +echo "Test directory: "$TEST_DIR +mkdir -p "$TEST_DIR" + +# run testplan +cd $TEST_DIR + +#jmeter -n -LDEBUG -t /vagrant/test/streaming/testplan.jmx -l results.jtx -j jmeter.log + +# quick bash equivalent in case Jmeter fails +STREAM_URI=$2 +COUNTER=0 +MAX_CLIENTS=$3 +while [ $COUNTER -lt $MAX_CLIENTS ]; do + # run cvlc headless, redirect stderr into stdout, pipe that into the report.sh script + cvlc -Vdummy --no-audio $STREAM_URI 2>&1 | /vagrant/test/streaming/report.sh ${COUNTER} & + sleep 1 + let COUNTER=COUNTER+1 +done + + + diff --git a/clmctest/streaming/setupCLMC.sh b/clmctest/streaming/setupCLMC.sh new file mode 100644 index 0000000..6d2bd38 --- /dev/null +++ b/clmctest/streaming/setupCLMC.sh @@ -0,0 +1,50 @@ +#!/bin/bash +#///////////////////////////////////////////////////////////////////////// +#// +#// (c) University of Southampton IT Innovation Centre, 2018 +#// +#// Copyright in this software belongs to University of Southampton +#// IT Innovation Centre of Gamma House, Enterprise Road, +#// Chilworth Science Park, Southampton, SO16 7NS, UK. +#// +#// This software may not be used, sold, licensed, transferred, copied +#// or reproduced in whole or in part in any manner or form or in or +#// on any media by any person other than in accordance with the terms +#// of the Licence Agreement supplied with the software, or otherwise +#// without the prior written consent of the copyright owners. +#// +#// This software is distributed WITHOUT ANY WARRANTY, without even the +#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +#// PURPOSE, except where stated in the Licence Agreement supplied with +#// the software. +#// +#// Created By : Simon Crowle +#// Created Date : 14/02/2018 +#// Created for Project : FLAME +#// +#///////////////////////////////////////////////////////////////////////// + +TEST_DIR=$1 + +# copy Kapacitor conf to /etc/kapacitor and restart + +systemctl stop kapacitor +echo $TEST_DIR"/kapacitor.conf" +cp $TEST_DIR/kapacitor.conf /etc/kapacitor/kapacitor.conf +systemctl start kapacitor + +# wait for kapacitor to restart +# TODO: do this better +sleep 5 + +# Set up Influx data source +curl -i -X POST -H "Content-Type: application/json" http://localhost:8888/chronograf/v1/sources -d @$TEST_DIR/influx.json + +# Set up Kapacitor +curl -i -X POST -H "Content-Type: application/json" http://localhost:8888/chronograf/v1/sources/1/kapacitors -d @$TEST_DIR/kapacitor.json + +# Set up rules +curl -i -X POST -H "Content-Type: application/json" http://localhost:9092/kapacitor/v1/tasks -d @$TEST_DIR/rules.json + +# Set up dashboard +curl -i -X POST -H "Content-Type: application/json" http://localhost:8888/chronograf/v1/dashboards -d @$TEST_DIR/dashboard.json diff --git a/clmctest/streaming/setupNGINX.sh b/clmctest/streaming/setupNGINX.sh new file mode 100644 index 0000000..3833350 --- /dev/null +++ b/clmctest/streaming/setupNGINX.sh @@ -0,0 +1,41 @@ +#!/bin/bash +#///////////////////////////////////////////////////////////////////////// +#// +#// (c) University of Southampton IT Innovation Centre, 2018 +#// +#// Copyright in this software belongs to University of Southampton +#// IT Innovation Centre of Gamma House, Enterprise Road, +#// Chilworth Science Park, Southampton, SO16 7NS, UK. +#// +#// This software may not be used, sold, licensed, transferred, copied +#// or reproduced in whole or in part in any manner or form or in or +#// on any media by any person other than in accordance with the terms +#// of the Licence Agreement supplied with the software, or otherwise +#// without the prior written consent of the copyright owners. +#// +#// This software is distributed WITHOUT ANY WARRANTY, without even the +#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +#// PURPOSE, except where stated in the Licence Agreement supplied with +#// the software. +#// +#// Created By : Simon Crowle +#// Created Date : 14/02/2018 +#// Created for Project : FLAME +#// +#///////////////////////////////////////////////////////////////////////// + +# NGINX +DEST_DIR="/usr/share/nginx/html" + +TEST_VIDEO="20180212104221flame-project-full.mp4" +TEST_VIDEO_ARCHIVE=$TEST_VIDEO".gz" +DEST_FILE=$DEST_DIR"/"$TEST_VIDEO_ARCHIVE + +echo "ftp://ftp.it-innovation.soton.ac.uk/testdata/video/"$TEST_VIDEO_ARCHIVE + +# Copy files for MPEG-DASH testing +curl "ftp://ftp.it-innovation.soton.ac.uk/testdata/video/"$TEST_VIDEO_ARCHIVE --user flame-rw:DR8ngj3ogSjd8gl -o $DEST_FILE +tar -xvf $DEST_FILE -C $DEST_DIR + +rm -rf $DEST_FILE +mv $DEST_DIR"/"$TEST_VIDEO $DEST_DIR"/"test_video diff --git a/clmctest/streaming/stop.sh b/clmctest/streaming/stop.sh new file mode 100644 index 0000000..b332fe3 --- /dev/null +++ b/clmctest/streaming/stop.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +for pid in $(ps -ef | grep "/usr/bin/vlc" | awk '{print $2}'); do kill -9 $pid; done +# TODO: 'killall vlc' should work: need to test though \ No newline at end of file diff --git a/clmctest/streaming/test_rspec.py b/clmctest/streaming/test_rspec.py new file mode 100644 index 0000000..0bbea54 --- /dev/null +++ b/clmctest/streaming/test_rspec.py @@ -0,0 +1,41 @@ +#!/usr/bin/python3 + +from subprocess import run +from platform import system +import pytest + + +@pytest.mark.parametrize("service_name", [ + 'clmc-service', + 'nginx1', + 'nginx2', + 'loadtest-streaming' +]) +def test_service_names(streaming_config, service_name): + """ + Tests the service names in the configuration. + + :param streaming_config: the configuration fixture collected from conftest.py + :param service_name the service name to test + """ + + assert any(s['name'] == service_name for s in streaming_config['hosts']), "{0} not in list of hosts".format(service_name) + print("\nSuccessfully passed configuration test for service name {0}\n".format(service_name)) + + +def test_ping(streaming_config): + """ + Pings each service to test for liveliness + + :param streaming_config: the configuration fixture collected from conftest.py + """ + + print("\n") # blank line printed for formatting purposes + + ping_count = 1 + system_dependent_param = "-n" if system().lower() == "windows" else "-c" + + for service in streaming_config['hosts']: + command = ["ping", system_dependent_param, str(ping_count), service['ip_address']] + assert run(command).returncode == 0, "Service ping test failed for {0} with ip address {1}".format(service['name'], service['ip_address']) + print("\nSuccessfully passed ping test for service: {0}\n".format(service['name'])) diff --git a/clmctest/streaming/test_streaming.py b/clmctest/streaming/test_streaming.py new file mode 100644 index 0000000..c1b7f77 --- /dev/null +++ b/clmctest/streaming/test_streaming.py @@ -0,0 +1,177 @@ +#!/usr/bin/python3 + +from threading import Thread +from time import sleep +from queue import Queue +from xml.etree import ElementTree +from urllib.parse import urljoin +from os.path import isfile, dirname, join +from os import remove, system +import pytest +import requests +import json + + +class TestStreamingAlerts(object): + """ + A testing class used to group all the tests related to the streaming scenario. + """ + + kapacitor_url = "http://localhost:9092/kapacitor/v1/tasks" + + @pytest.mark.parametrize("rule, log", [ + ("rules.json", "/tmp/RPSLoad.log"), + ]) + def test_alerts(self, rule, log, streaming_url, streaming_manifest): + """ + This test case generates some streaming requests to the server to ensure an alert is triggered and then tests the log file for this alert. Different logs can be tested by + appending to the list of parameters in the pytest decorator. + + Format for pytest parameters under test: + ([filename], [log]) + where [filename] is the name of the json file for the rule under test (must be in the same folder as this test is) + [log] is the absolute path of the log file that must be created due to an alert + + :param rule: the name of the rule json file + :param log: the path of the log file that is under test + :param streaming_url: the fixture providing the streaming url for this test case + :param streaming_manifest: the fixture providing the root of the XML streaming manifest + """ + + kapacitor_setter = self.kapacitor_setting(rule) + next(kapacitor_setter) # Setup the test rule + + try: + if isfile(log): + remove(log) # delete log file if existing from previous tests + except PermissionError: + system("sudo rm {0}".format(log)) # handles the case for running on linux where permission will be required to delete the old log file + + segments = streaming_manifest.findall(".//{urn:mpeg:DASH:schema:MPD:2011}SegmentURL") + + threads_num = 30 + threads_queue = Queue(maxsize=threads_num) # a synchronized queue is used to track if all the threads has finished execution + threads = [StreamingThread(streaming_url, segments, threads_queue) for _ in range(threads_num)] + for t in threads: + t.start() + + alert_created = False + while True: + # loop while threads are execution and do a check every 2.5 seconds to check if either alert log has been created or threads have finished execution + sleep(2.5) + if isfile(log): + for t in threads: # kill all running threads in case log file is created beforehand + t.stop() + alert_created = True + + if threads_queue.full(): + break + + assert alert_created, "Alerts test failed: no log file is created indicating a triggered alert." + + print("\nSuccessfully passed alert creation test.\n") + + next(kapacitor_setter) # Teardown the test rule + + def kapacitor_setting(self, rule): + """ + A generator function used to provide setUp/tearDown actions for a particular kapacitor rule. + On setUp rule is initialized, on tearDown rule is deleted. Interleaving is achieved using the generator pattern. + + :param rule: the name of the json file for the rule under test + """ + + # Initialization of the kapacitor rule - Test setUp (UnitTest style) + with open(join(dirname(__file__), rule), "r") as rule_file: + data = "".join(line.strip() for line in rule_file.readlines()) + + rule_data = json.loads(data) + requests.delete(url=urljoin(self.kapacitor_url + "/", rule_data.get("id"))) # delete in case of a task with the same ID already set in the kapacitor + requests.post(url=self.kapacitor_url, data=data, headers={"Content-Type": "application/json"}) + + yield + + # Deleting the kapacitor rule used for testing - Test tearDown (UnitTest style) + requests.delete(url=urljoin(self.kapacitor_url + "/", rule_data.get("id"))) + yield + + @staticmethod + @pytest.fixture(scope="class", params=[{"server": "http://192.168.50.11", "video": "/test_video/stream.mpd"}]) + def streaming_url(request): + """ + A fixture with class scope - used only in the scope of the testing class. + + :param request: the parameters for this fixture - server url and video relative url + :return: the combined URL for the video used for streaming + """ + + return urljoin(request.param["server"], request.param["video"]) + + @staticmethod + @pytest.fixture(scope="class") + def streaming_manifest(streaming_url): + """ + A fixture to download the manifest file for the streamed video and parse the downloaded XML content + + :param streaming_url: the fixture which provides the streaming url + :return: an XML root node object + """ + + manifest_xml = requests.get(streaming_url).text + root = ElementTree.fromstring(manifest_xml) + return root + + +class StreamingThread(Thread): + + def __init__(self, url, segments, queue): + """ + Subclassing the Thread class to create a custom streaming thread. + + :param url: the streaming url + :param segments: the list of SegmentURL XML nodes + :param queue: an auxiliary parameter used to indicate when this thread has finished execution + """ + + super(StreamingThread, self).__init__() + self.running = False + self.url = url + self.segments = segments + self.queue = queue + self._test_finished = False # a flag to indicate whether the thread should stop running + + def stop(self): + """ + Kill this thread and suspend its execution. + """ + + self._test_finished = True + + def run(self): + """ + A function, which simulates an actual streaming by downloading different audio/video segments from the server using a request session, + which leaves the connection open until executing. + """ + + size = len(self.segments) + size = size if size % 2 == 0 else size - 1 + + s = requests.session() + + for i in range(0, int(size / 2), 1): + segment_audio = self.segments[0] + segment_video = self.segments[int(size / 2) + i] + segment_audio_url = segment_audio.attrib.get('media') + segment_video_url = segment_video.attrib.get('media') + + s.get(urljoin(self.url, segment_audio_url)) + s.get(urljoin(self.url, segment_video_url)) + + # check if thread is killed in case the test has already succeeded + if self._test_finished: + break + + # a small time out to mimic the behaviour of a real streaming + sleep(2.5) + + self.queue.put(True) diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..5165fa5 --- /dev/null +++ b/setup.py @@ -0,0 +1,21 @@ +from setuptools import setup, find_packages + +setup( + name = "clmctest", + version = "0.0.1", + author = "Michael Boniface", + author_email = "mjb@it-innovation.soton.ac.uk", + description = "FLAME CLMC Testing Module", + license = "license", + keywords = "example documentation", + url = "http://packages.python.org/an_example_pypi_project", + packages=find_packages(exclude=["services"]), + include_package_data=True, + package_data={'': ['*.yml', '*.sh']}, + long_description="long description", + classifiers=[ + "Development Status :: Alpha", + "Topic :: FLAME Tests", + "License :: ", + ], +) \ No newline at end of file -- GitLab