From 87a0b50b2380ca312a38321e2dd44f888a34a810 Mon Sep 17 00:00:00 2001 From: MJB <mjb@it-innovation.soton.ac.uk> Date: Wed, 30 May 2018 23:51:54 +0100 Subject: [PATCH] clmc webservice deployment and test refactoring --- .gitignore | 1 + .gitlab-ci.yml | 10 +- Vagrantfile | 12 +- clmctest/streaming/__init__.py | 1 - clmctest/streaming/conftest.py | 58 -- clmctest/streaming/dashboard.json | 1 - clmctest/streaming/influx.json | 7 - clmctest/streaming/kapacitor.conf | 720 ------------------ clmctest/streaming/kapacitor.json | 6 - clmctest/streaming/manual.md | 146 ---- clmctest/streaming/report.sh | 45 -- clmctest/streaming/rspec.yml | 88 --- clmctest/streaming/rules.json | 9 - clmctest/streaming/run.sh | 57 -- clmctest/streaming/setupCLMC.sh | 40 - clmctest/streaming/setupNGINX.sh | 41 - clmctest/streaming/stop.sh | 26 - clmctest/streaming/test_rspec.py | 63 -- clmctest/streaming/test_rule1.json | 9 - clmctest/streaming/test_rule2.json | 9 - clmctest/streaming/test_streaming.py | 222 ------ docs/Measuring-E2E-MS-Performance.md | 4 +- docs/aggregation.md | 4 +- docs/clmc-service.md | 8 +- scripts/clmc-service/install.sh | 5 +- src/clmcagent/__init__.py | 1 - src/clmcagent/config_collector.py | 225 ------ src/clmcagent/stop_systemctl_monitor.sh | 3 - src/clmcagent/systemctl_monitor.py | 86 --- src/{clmcwebservice => service}/.coveragerc | 0 src/{clmcwebservice => service}/MANIFEST.in | 0 .../clmcservice/__init__.py | 0 .../clmcservice/aggregator.py | 2 +- .../clmcservice/tests.py | 28 +- .../clmcservice/utilities.py | 0 .../clmcservice/views.py | 0 .../development.ini | 2 +- .../production.ini | 0 src/{clmcwebservice => service}/pytest.ini | 0 src/{clmcwebservice => service}/setup.py | 0 src/{clmcwebservice => service}/tox.ini | 0 MANIFEST.in => src/test/MANIFEST.in | 0 {clmctest => src/test/clmctest}/__init__.py | 0 .../clmctest}/e2e_response_time/rspec.yml | 0 .../test/clmctest}/inputs/__init__.py | 0 .../test/clmctest}/inputs/conftest.py | 0 .../test/clmctest}/inputs/rspec.yml | 0 .../clmctest}/inputs/test_config_collector.py | 0 .../test/clmctest}/inputs/test_rspec.py | 0 .../clmctest}/inputs/test_systemctl_mon.py | 0 .../clmctest}/inputs/test_telegraf_agents.py | 0 .../test/clmctest}/monitoring/E2ESim.py | 0 .../monitoring/E2ETestAggregatorThread.py | 2 +- .../monitoring/LineProtocolGenerator.py | 0 .../test/clmctest}/monitoring/StreamingSim.py | 0 .../test/clmctest}/monitoring/__init__.py | 0 .../test/clmctest}/monitoring/conftest.py | 0 .../test/clmctest}/monitoring/rspec.yml | 0 .../clmctest}/monitoring/test_e2eresults.py | 8 +- .../test/clmctest}/monitoring/test_rspec.py | 0 .../clmctest}/monitoring/test_simresults.py | 0 .../test/clmctest}/scripts/__init__.py | 0 .../test/clmctest}/scripts/rspec.yml | 0 .../clmctest}/scripts/test_config_telegraf.py | 6 +- .../test/clmctest}/services/apache/install.sh | 0 .../services/apache/telegraf_apache.conf | 0 .../test/clmctest}/services/ffmpeg/install.sh | 0 .../services/ffmpeg/telegraf_ffmpeg.conf | 0 .../clmctest}/services/ffmpeg/transcode.sh | 0 .../test/clmctest}/services/host/install.sh | 0 .../services/host/telegraf_host.conf | 0 .../clmctest}/services/ipendpoint/install.sh | 0 .../ipendpoint/telegraf_ipendpoint.conf | 0 .../services/loadtest-streaming/install.sh | 0 .../telegraf_loadtest_streaming.conf | 0 .../test/clmctest}/services/minio/install.sh | 0 .../test/clmctest}/services/minio/minio.conf | 0 .../services/minio/telegraf_minio.conf | 0 .../test/clmctest}/services/mongo/install.sh | 0 .../services/mongo/telegraf_mongo.conf | 0 .../test/clmctest}/services/nginx/install.sh | 0 .../test/clmctest}/services/nginx/nginx.conf | 0 .../services/nginx/telegraf_nginx.conf | 0 .../test/clmctest}/services/pytest/install.sh | 0 .../test/clmctest}/services/vlc/install.sh | 0 setup.py => src/test/setup.py | 2 +- 86 files changed, 53 insertions(+), 1904 deletions(-) delete mode 100644 clmctest/streaming/__init__.py delete mode 100644 clmctest/streaming/conftest.py delete mode 100644 clmctest/streaming/dashboard.json delete mode 100644 clmctest/streaming/influx.json delete mode 100644 clmctest/streaming/kapacitor.conf delete mode 100644 clmctest/streaming/kapacitor.json delete mode 100644 clmctest/streaming/manual.md delete mode 100644 clmctest/streaming/report.sh delete mode 100644 clmctest/streaming/rspec.yml delete mode 100644 clmctest/streaming/rules.json delete mode 100644 clmctest/streaming/run.sh delete mode 100644 clmctest/streaming/setupCLMC.sh delete mode 100644 clmctest/streaming/setupNGINX.sh delete mode 100644 clmctest/streaming/stop.sh delete mode 100644 clmctest/streaming/test_rspec.py delete mode 100644 clmctest/streaming/test_rule1.json delete mode 100644 clmctest/streaming/test_rule2.json delete mode 100644 clmctest/streaming/test_streaming.py delete mode 100644 src/clmcagent/__init__.py delete mode 100644 src/clmcagent/config_collector.py delete mode 100644 src/clmcagent/stop_systemctl_monitor.sh delete mode 100644 src/clmcagent/systemctl_monitor.py rename src/{clmcwebservice => service}/.coveragerc (100%) rename src/{clmcwebservice => service}/MANIFEST.in (100%) rename src/{clmcwebservice => service}/clmcservice/__init__.py (100%) rename src/{clmcwebservice => service}/clmcservice/aggregator.py (99%) rename src/{clmcwebservice => service}/clmcservice/tests.py (94%) rename src/{clmcwebservice => service}/clmcservice/utilities.py (100%) rename src/{clmcwebservice => service}/clmcservice/views.py (100%) rename src/{clmcwebservice => service}/development.ini (96%) rename src/{clmcwebservice => service}/production.ini (100%) rename src/{clmcwebservice => service}/pytest.ini (100%) rename src/{clmcwebservice => service}/setup.py (100%) rename src/{clmcwebservice => service}/tox.ini (100%) rename MANIFEST.in => src/test/MANIFEST.in (100%) rename {clmctest => src/test/clmctest}/__init__.py (100%) rename {clmctest => src/test/clmctest}/e2e_response_time/rspec.yml (100%) rename {clmctest => src/test/clmctest}/inputs/__init__.py (100%) rename {clmctest => src/test/clmctest}/inputs/conftest.py (100%) rename {clmctest => src/test/clmctest}/inputs/rspec.yml (100%) rename {clmctest => src/test/clmctest}/inputs/test_config_collector.py (100%) rename {clmctest => src/test/clmctest}/inputs/test_rspec.py (100%) rename {clmctest => src/test/clmctest}/inputs/test_systemctl_mon.py (100%) rename {clmctest => src/test/clmctest}/inputs/test_telegraf_agents.py (100%) rename {clmctest => src/test/clmctest}/monitoring/E2ESim.py (100%) rename {clmctest => src/test/clmctest}/monitoring/E2ETestAggregatorThread.py (97%) rename {clmctest => src/test/clmctest}/monitoring/LineProtocolGenerator.py (100%) rename {clmctest => src/test/clmctest}/monitoring/StreamingSim.py (100%) rename {clmctest => src/test/clmctest}/monitoring/__init__.py (100%) rename {clmctest => src/test/clmctest}/monitoring/conftest.py (100%) rename {clmctest => src/test/clmctest}/monitoring/rspec.yml (100%) rename {clmctest => src/test/clmctest}/monitoring/test_e2eresults.py (94%) rename {clmctest => src/test/clmctest}/monitoring/test_rspec.py (100%) rename {clmctest => src/test/clmctest}/monitoring/test_simresults.py (100%) rename {clmctest => src/test/clmctest}/scripts/__init__.py (100%) rename {clmctest => src/test/clmctest}/scripts/rspec.yml (100%) rename {clmctest => src/test/clmctest}/scripts/test_config_telegraf.py (94%) rename {clmctest => src/test/clmctest}/services/apache/install.sh (100%) mode change 100755 => 100644 rename {clmctest => src/test/clmctest}/services/apache/telegraf_apache.conf (100%) rename {clmctest => src/test/clmctest}/services/ffmpeg/install.sh (100%) mode change 100755 => 100644 rename {clmctest => src/test/clmctest}/services/ffmpeg/telegraf_ffmpeg.conf (100%) rename {clmctest => src/test/clmctest}/services/ffmpeg/transcode.sh (100%) mode change 100755 => 100644 rename {clmctest => src/test/clmctest}/services/host/install.sh (100%) mode change 100755 => 100644 rename {clmctest => src/test/clmctest}/services/host/telegraf_host.conf (100%) rename {clmctest => src/test/clmctest}/services/ipendpoint/install.sh (100%) mode change 100755 => 100644 rename {clmctest => src/test/clmctest}/services/ipendpoint/telegraf_ipendpoint.conf (100%) rename {clmctest => src/test/clmctest}/services/loadtest-streaming/install.sh (100%) mode change 100755 => 100644 rename {clmctest => src/test/clmctest}/services/loadtest-streaming/telegraf_loadtest_streaming.conf (100%) rename {clmctest => src/test/clmctest}/services/minio/install.sh (100%) rename {clmctest => src/test/clmctest}/services/minio/minio.conf (100%) rename {clmctest => src/test/clmctest}/services/minio/telegraf_minio.conf (100%) rename {clmctest => src/test/clmctest}/services/mongo/install.sh (100%) mode change 100755 => 100644 rename {clmctest => src/test/clmctest}/services/mongo/telegraf_mongo.conf (100%) rename {clmctest => src/test/clmctest}/services/nginx/install.sh (100%) mode change 100755 => 100644 rename {clmctest => src/test/clmctest}/services/nginx/nginx.conf (100%) rename {clmctest => src/test/clmctest}/services/nginx/telegraf_nginx.conf (100%) rename {clmctest => src/test/clmctest}/services/pytest/install.sh (100%) mode change 100755 => 100644 rename {clmctest => src/test/clmctest}/services/vlc/install.sh (100%) mode change 100755 => 100644 rename setup.py => src/test/setup.py (97%) diff --git a/.gitignore b/.gitignore index cb69709..45e9d09 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ ubuntu-xenial-16.04-cloudimg-console.log .tox *$py.class **/.pytest_cache/ +build/ diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 801d2d7..8ac8b20 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -29,10 +29,14 @@ build:tests: only: - schedules script: - - python setup.py sdist --dist-dir=build + - cd $CI_PROJECT_DIR/src/test + - python setup.py sdist --dist-dir=$CI_PROJECT_DIR/build + - cd $CI_PROJECT_DIR/src/service + - python setup.py sdist --dist-dir=$CI_PROJECT_DIR/build artifacts: paths: - - build/clmctest-SNAPSHOT.tar.gz + - $CI_PROJECT_DIR/build/clmctest-SNAPSHOT.tar.gz + - $CI_PROJECT_DIR/build/clmcservice-SNAPSHOT.tar.gz expire_in: 1 day test:all: @@ -47,6 +51,8 @@ test:all: - vagrant --fixture=scripts -- ssh test-runner -- -tt "pytest -s --pyargs clmctest.scripts" - vagrant --fixture=monitoring -- up - vagrant --fixture=monitoring -- ssh test-runner -- -tt "pytest -s --pyargs clmctest.monitoring" + - vagrant --fixture=monitoring -- ssh test-runner -- -tt "pip3 install /vagrant/build/clmcservice-SNAPSHOT.tar.gz" + - vagrant --fixture=monitoring -- ssh test-runner -- -tt "pytest -s --pyargs clmcservice.tests" - vagrant --fixture=inputs -- up - vagrant --fixture=inputs -- ssh test-runner -- -tt "pytest -s --pyargs clmctest.inputs" when: on_success diff --git a/Vagrantfile b/Vagrantfile index 8fbb539..231a03d 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -48,8 +48,8 @@ end # load custom config file puts "loading custom infrastructure configuration: #{fixture}" -puts "custom config file: /clmctest/#{fixture}/rspec.yml" -host_rspec_file = "clmctest/#{fixture}/rspec.yml" +puts "custom config file: /src/test/clmctest/#{fixture}/rspec.yml" +host_rspec_file = "src/test/clmctest/#{fixture}/rspec.yml" hosts = YAML.load_file(host_rspec_file) # Start creating VMS using xenial64 as the base box @@ -89,12 +89,12 @@ Vagrant.configure("2") do |config| puts "Instance name #{instance_name}:" case instance_name when 'test-runner' - instance_config.vm.provision :shell, :path => "clmctest/services/pytest/install.sh" + instance_config.vm.provision :shell, :path => "src/test/clmctest/services/pytest/install.sh" when 'clmc-service' - instance_config.vm.provision :shell, :path => "scripts/clmc-service/install.sh", :args => "#{host["influxdb_url"]} #{host["database_name"]} #{host["report_period"]}" + instance_config.vm.provision :shell, :path => "scripts/clmc-service/install.sh", :args => "#{host["influxdb_url"]} #{host["database_name"]} #{host["report_period"]}", env: {"REPO_ROOT" => "/vagrant"} else # specific service install - instance_config.vm.provision :shell, :path => "clmctest/services/#{host["service_name"]}/install.sh", env: {"REPO_ROOT" => "/vagrant"} + instance_config.vm.provision :shell, :path => "src/test/clmctest/services/#{host["service_name"]}/install.sh", env: {"REPO_ROOT" => "/vagrant"} # CLMC agent install instance_config.vm.provision "file", source: "reporc", destination: "/vagrant/reporc" @@ -107,7 +107,7 @@ Vagrant.configure("2") do |config| cp /vagrant/scripts/clmc-agent/telegraf_output.conf /etc/telegraf/telegraf.d/ - cp /vagrant/clmctest/services/#{host["service_name"]}/telegraf_#{host["service_name"]}.conf /etc/telegraf/telegraf.d/ + cp /vagrant/src/test/clmctest/services/#{host["service_name"]}/telegraf_#{host["service_name"]}.conf /etc/telegraf/telegraf.d/ SHELL diff --git a/clmctest/streaming/__init__.py b/clmctest/streaming/__init__.py deleted file mode 100644 index 44f7725..0000000 --- a/clmctest/streaming/__init__.py +++ /dev/null @@ -1 +0,0 @@ -#!/usr/bin/python3 \ No newline at end of file diff --git a/clmctest/streaming/conftest.py b/clmctest/streaming/conftest.py deleted file mode 100644 index 1eb9a2d..0000000 --- a/clmctest/streaming/conftest.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/python3 -""" -## © University of Southampton IT Innovation Centre, 2018 -## -## Copyright in this software belongs to University of Southampton -## IT Innovation Centre of Gamma House, Enterprise Road, -## Chilworth Science Park, Southampton, SO16 7NS, UK. -## -## This software may not be used, sold, licensed, transferred, copied -## or reproduced in whole or in part in any manner or form or in or -## on any media by any person other than in accordance with the terms -## of the Licence Agreement supplied with the software, or otherwise -## without the prior written consent of the copyright owners. -## -## This software is distributed WITHOUT ANY WARRANTY, without even the -## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -## PURPOSE, except where stated in the Licence Agreement supplied with -## the software. -## -## Created By : Michael Boniface -## Created Date : 25-02-2018 -## Created for Project : FLAME -""" - -import pytest -import yaml -import requests -import time -import pkg_resources - - -@pytest.fixture(scope="module") -def streaming_config(): - """ - Reads the service configuration deployed for the streaming simulation test. - - :param request: access the parameters of the fixture - :return: the python object representing the read YAML file - """ - rspec = pkg_resources.resource_filename('clmctest.streaming', 'rspec.yml') - print("rspec file: {0}".format(rspec)) - - with open(rspec, 'r') as stream: - data_loaded = yaml.load(stream) - return data_loaded - - -@pytest.fixture(scope="module", autouse=True, - params=[{'config': {'kapacitor_url': 'http://localhost:8888/chronograf/v1/sources/1/kapacitors', 'kapacitor_file': '/vagrant/test/streaming/kapacitor.json'}}]) -def kapacitor_config(request): - - kapacitor_configuration = request.param['config']['kapacitor_file'] - with open(kapacitor_configuration, "r") as rule_file: - data = "".join(line.strip() for line in rule_file.readlines()) - - kapacitor_url = request.param['config']['kapacitor_url'] - requests.post(url=kapacitor_url, data=data, headers={"Content-Type": "application/json"}) - time.sleep(1) diff --git a/clmctest/streaming/dashboard.json b/clmctest/streaming/dashboard.json deleted file mode 100644 index 52e7384..0000000 --- a/clmctest/streaming/dashboard.json +++ /dev/null @@ -1 +0,0 @@ -{"id":1,"cells":[{"i":"396b0b14-1482-4b8a-a359-f144541170a4","x":6,"y":8,"w":6,"h":4,"name":"AdaptiveStreaming_SF_NetworkBytesSentPerSecond","queries":[{"query":"SELECT derivative(mean(\"bytes_sent\"), 1s) AS \"bytes_sent_per_second\" FROM \"CLMCMetrics\".\"autogen\".\"net\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx2' GROUP BY :interval:","queryConfig":{"database":"","measurement":"","retentionPolicy":"","fields":[],"tags":{},"groupBy":{"time":"","tags":[]},"areTagsAccepted":false,"rawText":"SELECT derivative(mean(\"bytes_sent\"), 1s) AS \"bytes_sent_per_second\" FROM \"CLMCMetrics\".\"autogen\".\"net\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx2' GROUP BY :interval:","range":null}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/1/cells/396b0b14-1482-4b8a-a359-f144541170a4"}},{"i":"480b4037-a816-4e1c-8c84-edb39b0c1f6d","x":0,"y":8,"w":6,"h":4,"name":"AdapativeStreaming_SF_NetworkBytesSentPerSecond","queries":[{"query":"SELECT derivative(mean(\"bytes_sent\"), 1s) AS \"bytes_sent_per_second\" FROM \"CLMCMetrics\".\"autogen\".\"net\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx1' GROUP BY :interval:","queryConfig":{"database":"","measurement":"","retentionPolicy":"","fields":[],"tags":{},"groupBy":{"time":"","tags":[]},"areTagsAccepted":false,"rawText":"SELECT derivative(mean(\"bytes_sent\"), 1s) AS \"bytes_sent_per_second\" FROM \"CLMCMetrics\".\"autogen\".\"net\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx1' GROUP BY :interval:","range":null}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/1/cells/480b4037-a816-4e1c-8c84-edb39b0c1f6d"}},{"i":"6ad170aa-c5f2-4930-a604-1e88579dffee","x":6,"y":4,"w":6,"h":4,"name":"AdaptiveStreaming_SF2_CPU","queries":[{"query":"SELECT 100-mean(\"usage_idle\") AS \"mean_usage_idle\" FROM \"CLMCMetrics\".\"autogen\".\"cpu\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx2' GROUP BY :interval:","queryConfig":{"database":"","measurement":"","retentionPolicy":"","fields":[],"tags":{},"groupBy":{"time":"","tags":[]},"areTagsAccepted":false,"rawText":"SELECT 100-mean(\"usage_idle\") AS \"mean_usage_idle\" FROM \"CLMCMetrics\".\"autogen\".\"cpu\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx2' GROUP BY :interval:","range":null}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/1/cells/6ad170aa-c5f2-4930-a604-1e88579dffee"}},{"i":"7e424259-32b8-40be-aa53-477aaf801f0e","x":0,"y":4,"w":6,"h":4,"name":"AdaptiveStreaming_SF1_CPU","queries":[{"query":"SELECT 100-mean(\"usage_idle\") AS \"mean_usage_idle\" FROM \"CLMCMetrics\".\"autogen\".\"cpu\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx1' GROUP BY :interval:","queryConfig":{"database":"","measurement":"","retentionPolicy":"","fields":[],"tags":{},"groupBy":{"time":"","tags":[]},"areTagsAccepted":false,"rawText":"SELECT 100-mean(\"usage_idle\") AS \"mean_usage_idle\" FROM \"CLMCMetrics\".\"autogen\".\"cpu\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx1' GROUP BY :interval:","range":null}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/1/cells/7e424259-32b8-40be-aa53-477aaf801f0e"}},{"i":"a095c820-8bac-45fe-974d-4030e1bb8770","x":6,"y":0,"w":6,"h":4,"name":"AdaptiveStreaming_SF2_ActiveConnections","queries":[{"query":"SELECT mean(\"active\") AS \"mean_active\" FROM \"CLMCMetrics\".\"autogen\".\"nginx\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx2' GROUP BY :interval:","label":"nginx.active","queryConfig":{"database":"CLMCMetrics","measurement":"nginx","retentionPolicy":"autogen","fields":[{"field":"active","funcs":["mean"]}],"tags":{"ipendpoint":["adaptive_streaming_I1_nginx2"]},"groupBy":{"time":"auto","tags":[]},"areTagsAccepted":true,"rawText":null,"range":null}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/1/cells/a095c820-8bac-45fe-974d-4030e1bb8770"}},{"i":"63a7e85a-b411-46be-9478-8479405379a3","x":0,"y":0,"w":6,"h":4,"name":"AdaptiveStreaming_SF1_ActiveConnections","queries":[{"query":"SELECT mean(\"active\") AS \"mean_active\" FROM \"CLMCMetrics\".\"autogen\".\"nginx\" WHERE time \u003e :dashboardTime: AND \"ipendpoint\"='adaptive_streaming_I1_nginx1' GROUP BY :interval:","label":"nginx.active","queryConfig":{"database":"CLMCMetrics","measurement":"nginx","retentionPolicy":"autogen","fields":[{"field":"active","funcs":["mean"]}],"tags":{"ipendpoint":["adaptive_streaming_I1_nginx1"]},"groupBy":{"time":"auto","tags":[]},"areTagsAccepted":true,"rawText":null,"range":null}}],"type":"line","links":{"self":"/chronograf/v1/dashboards/1/cells/63a7e85a-b411-46be-9478-8479405379a3"}}],"templates":[],"name":"Adaptive Streaming Experiment Dashboard","links":{"self":"/chronograf/v1/dashboards/1","cells":"/chronograf/v1/dashboards/1/cells","templates":"/chronograf/v1/dashboards/1/templates"}} diff --git a/clmctest/streaming/influx.json b/clmctest/streaming/influx.json deleted file mode 100644 index 34bb14a..0000000 --- a/clmctest/streaming/influx.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "id": "1", - "name": "Influx 1", - "url": "http://localhost:8086", - "default": true, - "telegraf": "CLMCMetrics" -} \ No newline at end of file diff --git a/clmctest/streaming/kapacitor.conf b/clmctest/streaming/kapacitor.conf deleted file mode 100644 index 7dfb797..0000000 --- a/clmctest/streaming/kapacitor.conf +++ /dev/null @@ -1,720 +0,0 @@ -## © University of Southampton IT Innovation Centre, 2018 -## -## Copyright in this software belongs to University of Southampton -## IT Innovation Centre of Gamma House, Enterprise Road, -## Chilworth Science Park, Southampton, SO16 7NS, UK. -## -## This software may not be used, sold, licensed, transferred, copied -## or reproduced in whole or in part in any manner or form or in or -## on any media by any person other than in accordance with the terms -## of the Licence Agreement supplied with the software, or otherwise -## without the prior written consent of the copyright owners. -## -## This software is distributed WITHOUT ANY WARRANTY, without even the -## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -## PURPOSE, except where stated in the Licence Agreement supplied with -## the software. -## -## Created By : Simon Crowle -## Created Date : 15-02-2018 -## Created for Project : FLAME - -# The hostname of this node. -# Must be resolvable by any configured InfluxDB hosts. -hostname = "localhost" -# Directory for storing a small amount of metadata about the server. -data_dir = "/var/lib/kapacitor" - -# Do not apply configuration overrides during startup. -# Useful if the configuration overrides cause Kapacitor to fail startup. -# This option is intended as a safe guard and should not be needed in practice. -skip-config-overrides = false - -# Default retention-policy, if a write is made to Kapacitor and -# it does not have a retention policy associated with it, -# then the retention policy will be set to this value -default-retention-policy = "" - -[http] - # HTTP API Server for Kapacitor - # This server is always on, - # it serves both as a write endpoint - # and as the API endpoint for all other - # Kapacitor calls. - bind-address = ":9092" - log-enabled = true - write-tracing = false - pprof-enabled = false - https-enabled = false - https-certificate = "/etc/ssl/kapacitor.pem" - -[config-override] - # Enable/Disable the service for overridding configuration via the HTTP API. - enabled = true - -[logging] - # Destination for logs - # Can be a path to a file or 'STDOUT', 'STDERR'. - file = "/var/log/kapacitor/kapacitor.log" - # Logging level can be one of: - # DEBUG, INFO, ERROR - # HTTP logging can be disabled in the [http] config section. - level = "INFO" - -[load] - # Enable/Disable the service for loading tasks/templates/handlers - # from a directory - enabled = true - # Directory where task/template/handler files are set - dir = "/etc/kapacitor/load" - - -[replay] - # Where to store replay files, aka recordings. - dir = "/var/lib/kapacitor/replay" - -[task] - # Where to store the tasks database - # DEPRECATED: This option is not needed for new installations. - # It is only used to determine the location of the task.db file - # for migrating to the new `storage` service. - dir = "/var/lib/kapacitor/tasks" - # How often to snapshot running task state. - snapshot-interval = "60s" - -[storage] - # Where to store the Kapacitor boltdb database - boltdb = "/var/lib/kapacitor/kapacitor.db" - -[deadman] - # Configure a deadman's switch - # Globally configure deadman's switches on all tasks. - # NOTE: for this to be of use you must also globally configure at least one alerting method. - global = false - # Threshold, if globally configured the alert will be triggered if the throughput in points/interval is <= threshold. - threshold = 0.0 - # Interval, if globally configured the frequency at which to check the throughput. - interval = "10s" - # Id -- the alert Id, NODE_NAME will be replaced with the name of the node being monitored. - id = "node 'NODE_NAME' in task '{{ .TaskName }}'" - # The message of the alert. INTERVAL will be replaced by the interval. - message = "{{ .ID }} is {{ if eq .Level \"OK\" }}alive{{ else }}dead{{ end }}: {{ index .Fields \"collected\" | printf \"%0.3f\" }} points/INTERVAL." - - -# Multiple InfluxDB configurations can be defined. -# Exactly one must be marked as the default. -# Each one will be given a name and can be referenced in batch queries and InfluxDBOut nodes. -[[influxdb]] - # Connect to an InfluxDB cluster - # Kapacitor can subscribe, query and write to this cluster. - # Using InfluxDB is not required and can be disabled. - enabled = true - default = true - name = "localhost" - urls = ["http://localhost:8086"] - username = "" - password = "" - timeout = 0 - # Absolute path to pem encoded CA file. - # A CA can be provided without a key/cert pair - # ssl-ca = "/etc/kapacitor/ca.pem" - # Absolutes paths to pem encoded key and cert files. - # ssl-cert = "/etc/kapacitor/cert.pem" - # ssl-key = "/etc/kapacitor/key.pem" - - # Do not verify the TLS/SSL certificate. - # This is insecure. - insecure-skip-verify = false - - # Maximum time to try and connect to InfluxDB during startup - startup-timeout = "5m" - - # Turn off all subscriptions - disable-subscriptions = false - - # Subscription mode is either "cluster" or "server" - subscription-mode = "cluster" - - # Which protocol to use for subscriptions - # one of 'udp', 'http', or 'https'. - subscription-protocol = "http" - - # Subscriptions resync time interval - # Useful if you want to subscribe to new created databases - # without restart Kapacitord - subscriptions-sync-interval = "1m0s" - - # Override the global hostname option for this InfluxDB cluster. - # Useful if the InfluxDB cluster is in a separate network and - # needs special config to connect back to this Kapacitor instance. - # Defaults to `hostname` if empty. - kapacitor-hostname = "" - - # Override the global http port option for this InfluxDB cluster. - # Useful if the InfluxDB cluster is in a separate network and - # needs special config to connect back to this Kapacitor instance. - # Defaults to the port from `[http] bind-address` if 0. - http-port = 0 - - # Host part of a bind address for UDP listeners. - # For example if a UDP listener is using port 1234 - # and `udp-bind = "hostname_or_ip"`, - # then the UDP port will be bound to `hostname_or_ip:1234` - # The default empty value will bind to all addresses. - udp-bind = "" - # Subscriptions use the UDP network protocl. - # The following options of for the created UDP listeners for each subscription. - # Number of packets to buffer when reading packets off the socket. - udp-buffer = 1000 - # The size in bytes of the OS read buffer for the UDP socket. - # A value of 0 indicates use the OS default. - udp-read-buffer = 0 - - [influxdb.subscriptions] - # Set of databases and retention policies to subscribe to. - # If empty will subscribe to all, minus the list in - # influxdb.excluded-subscriptions - # - # Format - # db_name = <list of retention policies> - # - # Example: - # my_database = [ "default", "longterm" ] - [influxdb.excluded-subscriptions] - # Set of databases and retention policies to exclude from the subscriptions. - # If influxdb.subscriptions is empty it will subscribe to all - # except databases listed here. - # - # Format - # db_name = <list of retention policies> - # - # Example: - # my_database = [ "default", "longterm" ] - -[kubernetes] - # Enable/Disable the kubernetes service. - # Needed by the k8sAutoscale TICKscript node. - enabled = false - # There are several ways to connect to the kubernetes API servers: - # - # Via the proxy, start the proxy via the `kubectl proxy` command: - # api-servers = ["http://localhost:8001"] - # - # From within the cluster itself, in which case - # kubernetes secrets and DNS services are used - # to determine the needed configuration. - # in-cluster = true - # - # Direct connection, in which case you need to know - # the URL of the API servers, the authentication token and - # the path to the ca cert bundle. - # These value can be found using the `kubectl config view` command. - # api-servers = ["http://192.168.99.100:8443"] - # token = "..." - # ca-path = "/path/to/kubernetes/ca.crt" - # - # Kubernetes can also serve as a discoverer for scrape targets. - # In that case the type of resources to discoverer must be specified. - # Valid values are: "node", "pod", "service", and "endpoint". - # resource = "pod" - - - -[smtp] - # Configure an SMTP email server - # Will use TLS and authentication if possible - # Only necessary for sending emails from alerts. - enabled = false - host = "localhost" - port = 25 - username = "" - password = "" - # From address for outgoing mail - from = "" - # List of default To addresses. - # to = ["oncall@example.com"] - - # Skip TLS certificate verify when connecting to SMTP server - no-verify = false - # Close idle connections after timeout - idle-timeout = "30s" - - # If true the all alerts will be sent via Email - # without explicitly marking them in the TICKscript. - global = false - # Only applies if global is true. - # Sets all alerts in state-changes-only mode, - # meaning alerts will only be sent if the alert state changes. - state-changes-only = false - -[snmptrap] - # Configure an SNMP trap server - enabled = false - # The host:port address of the SNMP trap server - addr = "localhost:162" - # The community to use for traps - community = "kapacitor" - # Number of retries when sending traps - retries = 1 - - -[opsgenie] - # Configure OpsGenie with your API key and default routing key. - enabled = false - # Your OpsGenie API Key. - api-key = "" - # Default OpsGenie teams, can be overridden per alert. - # teams = ["team1", "team2"] - # Default OpsGenie recipients, can be overridden per alert. - # recipients = ["recipient1", "recipient2"] - # The OpsGenie API URL should not need to be changed. - url = "https://api.opsgenie.com/v1/json/alert" - # The OpsGenie Recovery URL, you can change this - # based on which behavior you want a recovery to - # trigger (Add Notes, Close Alert, etc.) - recovery_url = "https://api.opsgenie.com/v1/json/alert/note" - # If true then all alerts will be sent to OpsGenie - # without explicitly marking them in the TICKscript. - # The team and recipients can still be overridden. - global = false - -[victorops] - # Configure VictorOps with your API key and default routing key. - enabled = false - # Your VictorOps API Key. - api-key = "" - # Default VictorOps routing key, can be overridden per alert. - routing-key = "" - # The VictorOps API URL should not need to be changed. - url = "https://alert.victorops.com/integrations/generic/20131114/alert" - # If true the all alerts will be sent to VictorOps - # without explicitly marking them in the TICKscript. - # The routing key can still be overridden. - global = false - # Use JSON for the "data" field - # New installations will want to set this to true as it makes - # the data that triggered the alert available within VictorOps. - # The default is "false" for backwards compatibility reasons. - # json-data = false - -[pagerduty] - # Configure PagerDuty. - enabled = false - # Your PagerDuty Service Key. - service-key = "" - # The PagerDuty API URL should not need to be changed. - url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" - # If true the all alerts will be sent to PagerDuty - # without explicitly marking them in the TICKscript. - global = false - -[pushover] - # Configure Pushover. - enabled = false - # Your Pushover API token. - token = "" - # Your Pushover USER_TOKEN. - user-key = "" - # The URL for the Pushover API. - url = "https://api.pushover.net/1/messages.json" - -########################################## -# Configure Alert POST request Endpoints - -# As ENV variables: -# KAPACITOR_HTTPPOST_0_ENDPOINT = "example" -# KAPACITOR_HTTPPOST_0_URL = "http://example.com" -# KAPACITOR_HTTPPOST_0_HEADERS_Example = "header" - -# [[httppost]] -# endpoint = "example" -# url = "http://example.com" -# headers = { Example = "your-key" } -# basic-auth = { username = "my-user", password = "my-pass" } -# -# # Provide an alert template for constructing a custom HTTP body. -# # Alert templates are only used with post alert handlers as they consume alert data. -# # The template uses https://golang.org/pkg/text/template/ and has access to the following fields: -# # * .ID - The unique ID for this alert -# # * .Message - The message of the alert -# # * .Details - The details of the alert -# # * .Time - The time the alert event occurred -# # * .Duration - The duration of the alert event. -# # * .Level - The level of the alert, i.e INFO, WARN, or CRITICAL. -# # * .Data - The data that triggered the alert. -# # -# # Specify the template inline. -# alert-template = "{{.Message}}:{{range .Data.Series}}{{.Tags}},{{range .Values}}{{.}}{{end}}{{end}}" -# # Specify an absolute path to a template file. -# alert-template-file = "/path/to/template/file" -# -# # Provide a row template for constructing a custom HTTP body. -# # Row templates are only used with httpPost pipeline nodes as they consume a row at a time. -# # The template uses https://golang.org/pkg/text/template/ and has access to the following fields: -# # * .Name - The measurement name of the data stream -# # * .Tags - A map of tags on the data. -# # * .Values - A list of values, each entry is a map containing a "time" key for the time of the point -# # and keys for all other fields on the point. -# # -# # Specify the template inline. -# row-template = "{{.Name}} host={{index .Tags \"host\"}}{{range .Values}} {{index . "time"}} {{index . "value"}}{{end}}" -# # Specify an absolute path to a template file. -# row-template-file = "/path/to/template/file" - -[slack] - # Configure Slack. - enabled = true - # The Slack webhook URL, can be obtained by adding - # an Incoming Webhook integration. - # Visit https://slack.com/services/new/incoming-webhook - # to add new webhook for Kapacitor. - url = "https://hooks.slack.com/services/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" - # Default channel for messages - channel = "#clmc" - # If true all the alerts will be sent to Slack - # without explicitly marking them in the TICKscript. - global = false - # Only applies if global is true. - # Sets all alerts in state-changes-only mode, - # meaning alerts will only be sent if the alert state changes. - state-changes-only = false - -[telegram] - # Configure Telegram. - enabled = false - # The Telegram Bot URL should not need to be changed. - url = "https://api.telegram.org/bot" - # Telegram Bot Token, can be obtained From @BotFather. - token = "" - # Default recipient for messages, Contact @myidbot on Telegram to get an ID. - chat-id = "" - # Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your alert message. - #parse-mode = "Markdown" - # Disable link previews for links in this message - disable-web-page-preview = false - # Sends the message silently. iOS users will not receive a notification, Android users will receive a notification with no sound. - disable-notification = false - # If true the all alerts will be sent to Telegram - # without explicitly marking them in the TICKscript. - global = false - # Only applies if global is true. - # Sets all alerts in state-changes-only mode, - # meaning alerts will only be sent if the alert state changes. - state-changes-only = false - -[hipchat] - # Configure HipChat. - enabled = false - # The HipChat API URL. Replace subdomain with your - # HipChat subdomain. - # url = "https://subdomain.hipchat.com/v2/room" - # Visit https://www.hipchat.com/docs/apiv2 - # for information on obtaining your room id and - # authentication token. - # Default room for messages - room = "" - # Default authentication token - token = "" - # If true then all alerts will be sent to HipChat - # without explicitly marking them in the TICKscript. - global = false - # Only applies if global is true. - # Sets all alerts in state-changes-only mode, - # meaning alerts will only be sent if the alert state changes. - state-changes-only = false - -[alerta] - # Configure Alerta. - enabled = false - # The Alerta URL. - url = "" - # Default authentication token. - token = "" - # Default token prefix - # If you are on older versions of alerta you may need to change this to "Key" - token-prefix = "Bearer" - # Default environment. - environment = "" - # Default origin. - origin = "kapacitor" - -[sensu] - # Configure Sensu. - enabled = false - # The Sensu Client host:port address. - addr = "sensu-client:3030" - # Default JIT source. - source = "Kapacitor" - -[reporting] - # Send usage statistics - # every 12 hours to Enterprise. - enabled = true - url = "https://usage.influxdata.com" - -[stats] - # Emit internal statistics about Kapacitor. - # To consume these stats create a stream task - # that selects data from the configured database - # and retention policy. - # - # Example: - # stream|from().database('_kapacitor').retentionPolicy('autogen')... - # - enabled = true - stats-interval = "10s" - database = "_kapacitor" - retention-policy= "autogen" - -[udf] -# Configuration for UDFs (User Defined Functions) -[udf.functions] - # Example go UDF. - # First compile example: - # go build -o avg_udf ./udf/agent/examples/moving_avg.go - # - # Use in TICKscript like: - # stream.goavg() - # .field('value') - # .size(10) - # .as('m_average') - # - # uncomment to enable - #[udf.functions.goavg] - # prog = "./avg_udf" - # args = [] - # timeout = "10s" - - # Example python UDF. - # Use in TICKscript like: - # stream.pyavg() - # .field('value') - # .size(10) - # .as('m_average') - # - # uncomment to enable - #[udf.functions.pyavg] - # prog = "/usr/bin/python2" - # args = ["-u", "./udf/agent/examples/moving_avg.py"] - # timeout = "10s" - # [udf.functions.pyavg.env] - # PYTHONPATH = "./udf/agent/py" - - # Example UDF over a socket - #[udf.functions.myCustomUDF] - # socket = "/path/to/socket" - # timeout = "10s" - -[talk] - # Configure Talk. - enabled = false - # The Talk webhook URL. - url = "https://jianliao.com/v2/services/webhook/uuid" - # The default authorName. - author_name = "Kapacitor" - -# MQTT client configuration. -# Mutliple different clients may be configured by -# repeating [[mqtt]] sections. -[[mqtt]] - enabled = false - # Unique name for this broker configuration - name = "localhost" - # Whether this broker configuration is the default - default = true - # URL of the MQTT broker. - # Possible protocols include: - # tcp - Raw TCP network connection - # ssl - TLS protected TCP network connection - # ws - Websocket network connection - url = "tcp://localhost:1883" - - # TLS/SSL configuration - # A CA can be provided without a key/cert pair - # ssl-ca = "/etc/kapacitor/ca.pem" - # Absolutes paths to pem encoded key and cert files. - # ssl-cert = "/etc/kapacitor/cert.pem" - # ssl-key = "/etc/kapacitor/key.pem" - - # Unique ID for this MQTT client. - # If empty used the value of "name" - client-id = "" - - # Username - username = "" - # Password - password = "" - -[[swarm]] - # Enable/Disable the Docker Swarm service. - # Needed by the swarmAutoscale TICKscript node. - enabled = false - # Unique ID for this Swarm cluster - # NOTE: This is not the ID generated by Swarm rather a user defined - # ID for this cluster since Kapacitor can communicate with multiple clusters. - id = "" - # List of URLs for Docker Swarm servers. - servers = ["http://localhost:2376"] - # TLS/SSL Configuration for connecting to secured Docker daemons - ssl-ca = "" - ssl-cert = "" - ssl-key = "" - insecure-skip-verify = false - -################################## -# Input Methods, same as InfluxDB -# - -[collectd] - enabled = false - bind-address = ":25826" - database = "collectd" - retention-policy = "" - batch-size = 1000 - batch-pending = 5 - batch-timeout = "10s" - typesdb = "/usr/share/collectd/types.db" - -[opentsdb] - enabled = false - bind-address = ":4242" - database = "opentsdb" - retention-policy = "" - consistency-level = "one" - tls-enabled = false - certificate = "/etc/ssl/influxdb.pem" - batch-size = 1000 - batch-pending = 5 - batch-timeout = "1s" - -# Service Discovery and metric scraping - -[[scraper]] - enabled = false - name = "myscraper" - # Specify the id of a discoverer service specified below - discoverer-id = "" - # Specify the type of discoverer service being used. - discoverer-service = "" - db = "prometheus_raw" - rp = "autogen" - type = "prometheus" - scheme = "http" - metrics-path = "/metrics" - scrape-interval = "1m0s" - scrape-timeout = "10s" - username = "" - password = "" - bearer-token = "" - ssl-ca = "" - ssl-cert = "" - ssl-key = "" - ssl-server-name = "" - insecure-skip-verify = false - -# Supported discovery services - -[[azure]] - enabled = false - id = "myazure" - port = 80 - subscription-id = "" - tenant-id = "" - client-id = "" - client-secret = "" - refresh-interval = "5m0s" - -[[consul]] - enabled = false - id = "myconsul" - address = "127.0.0.1:8500" - token = "" - datacenter = "" - tag-separator = "," - scheme = "http" - username = "" - password = "" - ssl-ca = "" - ssl-cert = "" - ssl-key = "" - ssl-server-name = "" - insecure-skip-verify = false - -[[dns]] - enabled = false - id = "mydns" - refresh-interval = "30s" - ## Type can be SRV, A, or AAAA - type = "SRV" - ## Port is the port to scrape for records returned by A or AAAA types - port = 80 - -[[ec2]] - enabled = false - id = "myec2" - region = "us-east-1" - access-key = "" - secret-key = "" - profile = "" - refresh-interval = "1m0s" - port = 80 - -[[file-discovery]] - enabled = false - id = "myfile" - refresh-interval = "5m0s" - files = [] - -[[gce]] - enabled = false - id = "mygce" - project = "" - zone = "" - filter = "" - refresh-interval = "1m0s" - port = 80 - tag-separator = "," - -[[marathon]] - enabled = false - id = "mymarathon" - timeout = "30s" - refresh-interval = "30s" - bearer-token = "" - ssl-ca = "" - ssl-cert = "" - ssl-key = "" - ssl-server-name = "" - insecure-skip-verify = false - -[[nerve]] - enabled = false - id = "mynerve" - timeout = "10s" - -[[serverset]] - enabled = false - id = "myserverset" - timeout = "10s" - -[[static-discovery]] - enabled = false - id = "mystatic" - targets = ["localhost:9100"] - [static.labels] - region = "us-east-1" - -[[triton]] - enabled = false - id = "mytriton" - account = "" - dns-suffix = "" - endpoint = "" - port = 9163 - refresh-interval = "1m0s" - version = 1 - ssl-ca = "" - ssl-cert = "" - ssl-key = "" - ssl-server-name = "" - insecure-skip-verify = false diff --git a/clmctest/streaming/kapacitor.json b/clmctest/streaming/kapacitor.json deleted file mode 100644 index 6011886..0000000 --- a/clmctest/streaming/kapacitor.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "id": "1", - "name": "CLMCKapacitor", - "url": "http://localhost:9092", - "active": false -} \ No newline at end of file diff --git a/clmctest/streaming/manual.md b/clmctest/streaming/manual.md deleted file mode 100644 index 7db0fc7..0000000 --- a/clmctest/streaming/manual.md +++ /dev/null @@ -1,146 +0,0 @@ -<!-- -// © University of Southampton IT Innovation Centre, 2017 -// -// Copyright in this software belongs to University of Southampton -// IT Innovation Centre of Gamma House, Enterprise Road, -// Chilworth Science Park, Southampton, SO16 7NS, UK. -// -// This software may not be used, sold, licensed, transferred, copied -// or reproduced in whole or in part in any manner or form or in or -// on any media by any person other than in accordance with the terms -// of the Licence Agreement supplied with the software, or otherwise -// without the prior written consent of the copyright owners. -// -// This software is distributed WITHOUT ANY WARRANTY, without even the -// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -// PURPOSE, except where stated in the Licence Agreement supplied with -// the software. -// -// Created By : Michael Boniface -// Updated By : Simon Crowle -// Created Date : 18-12-2017 -// Update Date : 14-02-2018 -// Created for Project : FLAME ---> - -# CLMC Adaptive Streaming Test - -This test streams mpeg-dash video using the two nginx servers monitored by Telegraf configured with a default apache plugin and a net_response plugin. The data is stored in the `clmc-service` using database `CLMCMetrics` and measurements `nginx` and `net_response` - -The following command brings up the services - -`vagrant --fixture=streaming up` - -* clmc-service: configured with influx, kapacitor, chornograf -* nginx1@DC1, nginx2@DC2: configured with nginx and a test video located at http://192.168.50.11:80/test_video/stream.mpd on the internal vbox network and at http://localhost:8081/test_video/stream.mpd if accessing from the host machine - -### Run the test set-up - -`vagrant --fixture=streaming ssh clmc-service -- "sudo /vagrant/test/streaming/setupCLMC.sh /vagrant/test/streaming"` -`vagrant --fixture=streaming ssh nginx1 -- "sudo /vagrant/test/streaming/setupNGINX.sh"` - -### Run the automated test - -To run the load test using the following command (here, the last parameter '15' refers to the number of VLC player clients to be launched): - -`vagrant --fixture=streaming ssh loadtest-streaming -- "sudo /vagrant/test/streaming/run.sh /home/ubuntu/test/streaming http://192.168.50.11/test_video/stream.mpd 15` - -This test currently just generates the load and does not have any assertions. It breaks at 1000. - -And then point your browser to the Chronograf dashboard: - -`http://localhost:8888` - -### Run the automated PyTests - -SSH into the clmc-service VM: - -`vagrant --fixture=streaming ssh clmc-service` - -Run the automated tests written in pytest: - -`pytest -s /vagrant/test/streaming/` - -### Manual test - -## Manual set-up of Chronograf's CLMC data source - -If you __do not__ want to run the automatic set-up, basic entry to the Chronograf dashboard is as follows: - -1. Point your browser to: [http://localhost:8888](http://localhost:8888) -2. Enter your connection string: `http://localhost:8086` -3. Enter the Name: `Influx 1` -4. Enter the Telegraf database: `CLMCMetrics` - -## Manual test on Windows - -### View the video -Install VLC video client on the host machine, you must use a very recent version otherwise the MPD file cannot we read. At the time of writng the following nighly build was installed: - -https://nightlies.videolan.org/build/win32/vlc-3.0.0-rc1-20171201-0326/vlc-3.0.0-20171201-0326-rc1-win32.exe - -Start the VLC Player - -`Media->Open Network Stream` - -The test video is the FLAME project video and it can be viewed at the following location. - -`Enter the network URL: http://localhost:8081/test_video/stream.mpd for nginx1 server` - -The video should play. - -### Query the data - -Open Chronograph by entering the following URL into a browser on the host http://localhost:8888. Your CLMC data source, Kapacitor and demonstration dashboard should be ready for you to explore. - -Press the Data Explorer in the menu and select the nginx measurement and create a query such as - -`SELECT mean("requests") AS "mean_requests" FROM "CLMCMetrics"."autogen"."nginx" WHERE time > now() - 1h GROUP BY time(10s)` - -## KPI triggers - -In this demonstrator an example KPI rule has been set up in Kapacitor which fires when the average number of active connections per 5 seconds on the Nginx 1 or Nginx 2 server goes above certain thresholds ( a 'warning' at 10 connections/5 seconds ). The TICKscript specification for this rule is as follows: - -``` -dbrp "CLMCMetrics"."autogen" - -// Nginx 1 rule -// ------------- -var n1Data = batch - |query(''' SELECT mean("active") AS "mean_active" FROM "CLMCMetrics"."autogen"."nginx" WHERE "ipendpoint"='adaptive_streaming_I1_nginx1' ''') - .period(5s) - .every(5s) - -varn n1Alert = n1Data - |alert() - .id('{{ .Name }}/adaptive_streaming_I1_nginx1') - .message('{{ .ID }} is {{ .Level }} Mean active connections: {{ index .Fields "mean_active" }}') - .warn(lambda: "mean_active" > 10) - .slack() - .log( '/tmp/RPSLoad.log' ) - -// Nginx 2 rule -// ------------- -var n2Data = batch - |query(''' SELECT mean("active") AS "mean_active" FROM "CLMCMetrics"."autogen"."nginx" WHERE "ipendpoint"='adaptive_streaming_I1_nginx2' ''') - .period(5s) - .every(5s) - -var n2Alert = n2Data - |alert() - .id('{{ .Name }}/adaptive_streaming_I1_nginx2') - .message('{{ .ID }} is {{ .Level }} Mean active connections: {{ index .Fields "mean_active" }}') - .warn(lambda: "mean_active" > 10) - .slack() - .log( '/tmp/RPSLoad.log' ) -``` - -Alerts are sent to both an internal logging within the CLMC service file system and also to a FLAME demo Slack service: - -https://flamedemo-itinnov.slack.com - -Alerts can be found under the '#clmc' channel. - -### Kapacitor rules in Chronograf's GUI - -Additional rules can be added to this demonstrator either via the Chronograf GUI (see [here](https://docs.influxdata.com/chronograf/v1.4/introduction/getting-started/#4-connect-chronograf-to-kapacitor) for more information) or by using the Kapacitor HTTP API and TICKscript (for an introduction, [look here](https://docs.influxdata.com/kapacitor/v1.4/tick/)). diff --git a/clmctest/streaming/report.sh b/clmctest/streaming/report.sh deleted file mode 100644 index 0179a1e..0000000 --- a/clmctest/streaming/report.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -## © University of Southampton IT Innovation Centre, 2018 -## -## Copyright in this software belongs to University of Southampton -## IT Innovation Centre of Gamma House, Enterprise Road, -## Chilworth Science Park, Southampton, SO16 7NS, UK. -## -## This software may not be used, sold, licensed, transferred, copied -## or reproduced in whole or in part in any manner or form or in or -## on any media by any person other than in accordance with the terms -## of the Licence Agreement supplied with the software, or otherwise -## without the prior written consent of the copyright owners. -## -## This software is distributed WITHOUT ANY WARRANTY, without even the -## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -## PURPOSE, except where stated in the Licence Agreement supplied with -## the software. -## -## Created By : Stephen Phillips -## Created Date : 19-02-2018 -## Created for Project : FLAME - -# This script reads stdin and expects the output of cvlc. -# It is used by the run.sh script and receives the output of the cvlc client. -# It counts the number of times the frame "dropping" error is seen and every 10 times it sends a message to telegraf reporting "another 10" errors. - -if [ "$#" -ne 1 ]; then - echo "Error: illegal number of arguments: "$# - echo "Usage: report.sh <client number>" - exit -fi - -COUNTER=$1 -TELEGRAF=http://localhost:8186 - -ERR_COUNT=0 -while read line; do - if [[ $line = *"dropping"* ]]; then - ERR_COUNT=$(($ERR_COUNT + 1)) - fi - TEN=$((ERR_COUNT % 10)) - if [ $TEN -eq 0 ]; then - curl -i -XPOST "${TELEGRAF}/write?precision=s" --data-binary "vlc,client=${COUNTER} drop_error=10 $(date +%s)" >& /dev/null - fi -done \ No newline at end of file diff --git a/clmctest/streaming/rspec.yml b/clmctest/streaming/rspec.yml deleted file mode 100644 index 5a0c594..0000000 --- a/clmctest/streaming/rspec.yml +++ /dev/null @@ -1,88 +0,0 @@ -## (c) University of Southampton IT Innovation Centre, 2018 -## -## Copyright in this software belongs to University of Southampton -## IT Innovation Centre of Gamma House, Enterprise Road, -## Chilworth Science Park, Southampton, SO16 7NS, UK. -## -## This software may not be used, sold, licensed, transferred, copied -## or reproduced in whole or in part in any manner or form or in or -## on any media by any person other than in accordance with the terms -## of the Licence Agreement supplied with the software, or otherwise -## without the prior written consent of the copyright owners. -## -## This software is distributed WITHOUT ANY WARRANTY, without even the -## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -## PURPOSE, except where stated in the Licence Agreement supplied with -## the software. -## -## Created By : Michael Boniface -## Created Date : 02-02-2018 -## Created for Project : FLAME - -hosts: - - name: clmc-service - cpus: 1 - memory: 2048 - disk: "10GB" - forward_ports: - - guest: 8086 - host: 8086 - - guest: 8888 - host: 8888 - - guest: 9092 - host: 9092 - ip_address: "192.168.50.10" - - name: nginx1 - cpus: 1 - memory: 2048 - disk: "10GB" - service_name: "nginx" - forward_ports: - - guest: 80 - host: 8081 - ip_address: "192.168.50.11" - location: "DC1" - sfc_id: "MS_Template_1" - sfc_id_instance: "MS_I1" - sf_id: "adaptive_streaming" - sf_id_instance: "adaptive_streaming_I1" - ipendpoint_id: "adaptive_streaming_I1_nginx1" - sr_id: "service_router" - influxdb_url: "http://192.168.50.10:8086" - database_name: "CLMCMetrics" - - name: nginx2 - cpus: 1 - memory: 2048 - disk: "10GB" - service_name: "nginx" - forward_ports: - - guest: 80 - host: 8082 - ip_address: "192.168.50.12" - location: "DC2" - sfc_id: "MS_Template_1" - sfc_id_instance: "MS_I1" - sf_id: "adaptive_streaming" - sf_id_instance: "adaptive_streaming_I1" - ipendpoint_id: "adaptive_streaming_I1_nginx2" - sr_id: "service_router" - influxdb_url: "http://192.168.50.10:8086" - database_name: "CLMCMetrics" - - name: loadtest-streaming - cpus: 2 - memory: 4096 - disk: "10GB" - service_name: "loadtest-streaming" - forward_ports: - - guest: 80 - host: 8083 - ip_address: "192.168.50.13" - location: "DC1" - sfc_id: "MS_Template_1" - sfc_id_instance: "MS_I1" - sf_id: "adaptive_streaming_client" - sf_id_instance: "adaptive_streaming_I1" - ipendpoint_id: "adaptive_streaming_I1_client1" - sr_id: "service_router" - influxdb_url: "http://192.168.50.10:8086" - database_name: "CLMCMetrics" diff --git a/clmctest/streaming/rules.json b/clmctest/streaming/rules.json deleted file mode 100644 index faad48b..0000000 --- a/clmctest/streaming/rules.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id" : "Request_Rate_Alert_NGINXServers", - "type" : "batch", - "dbrps" : [{"db": "CLMCMetrics", "rp" : "autogen"}], - - "script" : "\/\/ NGINX 1 Rule\r\n\/\/ -------------\r\nvar n1Data = batch\r\n |query(''' SELECT mean(\"active\") AS \"mean_active\" FROM \"CLMCMetrics\".\"autogen\".\"nginx\" WHERE \"ipendpoint\"='adaptive_streaming_I1_nginx1' ''')\r\n .period(5s)\r\n .every(5s)\r\n\r\nvar n1Alert = n1Data\r\n |alert()\r\n .id('{{ .Name }}\/adaptive_streaming_I1_nginx1')\r\n .message('{{ .ID }} is {{ .Level }} Mean active connections: {{ index .Fields \"mean_active\" }}')\r\n .warn(lambda: \"mean_active\" > 10)\r\n .slack()\r\n .log( '\/tmp\/RPSLoad.log' )\r\n\r\n\/\/ NGINX 2 Rule\r\n\/\/ -------------\r\nvar n2Data = batch\r\n |query(''' SELECT mean(\"active\") AS \"mean_active\" FROM \"CLMCMetrics\".\"autogen\".\"nginx\" WHERE \"ipendpoint\"='adaptive_streaming_I1_nginx2' ''')\r\n .period(5s)\r\n .every(5s)\r\n\r\nvar n2Alert = n2Data\r\n |alert()\r\n .id('{{ .Name }}\/adaptive_streaming_I1_nginx2')\r\n .message('{{ .ID }} is {{ .Level }} Mean active connections: {{ index .Fields \"mean_active\" }}')\r\n .warn(lambda: \"mean_active\" > 10)\r\n .slack()\r\n .log( '\/tmp\/RPSLoad.log' )", - - "status" : "enabled" -} \ No newline at end of file diff --git a/clmctest/streaming/run.sh b/clmctest/streaming/run.sh deleted file mode 100644 index 81c7d5f..0000000 --- a/clmctest/streaming/run.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash -#///////////////////////////////////////////////////////////////////////// -#// -#// (c) University of Southampton IT Innovation Centre, 2017 -#// -#// Copyright in this software belongs to University of Southampton -#// IT Innovation Centre of Gamma House, Enterprise Road, -#// Chilworth Science Park, Southampton, SO16 7NS, UK. -#// -#// This software may not be used, sold, licensed, transferred, copied -#// or reproduced in whole or in part in any manner or form or in or -#// on any media by any person other than in accordance with the terms -#// of the Licence Agreement supplied with the software, or otherwise -#// without the prior written consent of the copyright owners. -#// -#// This software is distributed WITHOUT ANY WARRANTY, without even the -#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -#// PURPOSE, except where stated in the Licence Agreement supplied with -#// the software. -#// -#// Created By : Michael Boniface -#// Created Date : 15/02/2017 -#// Created for Project : FLAME -#// -#///////////////////////////////////////////////////////////////////////// - -if [ "$#" -ne 3 ]; then - echo "Error: illegal number of arguments: "$# - echo "Usage: run.sh TEST_RUN_DIR STREAM_URI MAX_CLIENTS" - exit -fi - -# create test directories -TEST_FOLDER=$(date +%Y%m%d%H%M%S) -TEST_RUN_DIR=$1 -TEST_DIR=$TEST_RUN_DIR"/streaming/"$TEST_FOLDER -echo "Test directory: "$TEST_DIR -mkdir -p "$TEST_DIR" - -# run testplan -cd $TEST_DIR - -#jmeter -n -LDEBUG -t /vagrant/test/streaming/testplan.jmx -l results.jtx -j jmeter.log - -# quick bash equivalent in case Jmeter fails -STREAM_URI=$2 -COUNTER=0 -MAX_CLIENTS=$3 -while [ $COUNTER -lt $MAX_CLIENTS ]; do - # run cvlc headless, redirect stderr into stdout, pipe that into the report.sh script - cvlc -Vdummy --no-audio $STREAM_URI 2>&1 | /vagrant/test/streaming/report.sh ${COUNTER} & - sleep 1 - let COUNTER=COUNTER+1 -done - - - diff --git a/clmctest/streaming/setupCLMC.sh b/clmctest/streaming/setupCLMC.sh deleted file mode 100644 index e7e2fc9..0000000 --- a/clmctest/streaming/setupCLMC.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -#///////////////////////////////////////////////////////////////////////// -#// -#// (c) University of Southampton IT Innovation Centre, 2018 -#// -#// Copyright in this software belongs to University of Southampton -#// IT Innovation Centre of Gamma House, Enterprise Road, -#// Chilworth Science Park, Southampton, SO16 7NS, UK. -#// -#// This software may not be used, sold, licensed, transferred, copied -#// or reproduced in whole or in part in any manner or form or in or -#// on any media by any person other than in accordance with the terms -#// of the Licence Agreement supplied with the software, or otherwise -#// without the prior written consent of the copyright owners. -#// -#// This software is distributed WITHOUT ANY WARRANTY, without even the -#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -#// PURPOSE, except where stated in the Licence Agreement supplied with -#// the software. -#// -#// Created By : Simon Crowle -#// Created Date : 14/02/2018 -#// Created for Project : FLAME -#// -#///////////////////////////////////////////////////////////////////////// - -TEST_DIR=$1 - -# copy Kapacitor conf to /etc/kapacitor and restart - -systemctl stop kapacitor -echo $TEST_DIR"/kapacitor.conf" -cp $TEST_DIR/kapacitor.conf /etc/kapacitor/kapacitor.conf -systemctl start kapacitor - -# Set up Influx data source -curl -i -X POST -H "Content-Type: application/json" http://localhost:8888/chronograf/v1/sources -d @$TEST_DIR/influx.json - -# Set up dashboard -curl -i -X POST -H "Content-Type: application/json" http://localhost:8888/chronograf/v1/dashboards -d @$TEST_DIR/dashboard.json diff --git a/clmctest/streaming/setupNGINX.sh b/clmctest/streaming/setupNGINX.sh deleted file mode 100644 index 3833350..0000000 --- a/clmctest/streaming/setupNGINX.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -#///////////////////////////////////////////////////////////////////////// -#// -#// (c) University of Southampton IT Innovation Centre, 2018 -#// -#// Copyright in this software belongs to University of Southampton -#// IT Innovation Centre of Gamma House, Enterprise Road, -#// Chilworth Science Park, Southampton, SO16 7NS, UK. -#// -#// This software may not be used, sold, licensed, transferred, copied -#// or reproduced in whole or in part in any manner or form or in or -#// on any media by any person other than in accordance with the terms -#// of the Licence Agreement supplied with the software, or otherwise -#// without the prior written consent of the copyright owners. -#// -#// This software is distributed WITHOUT ANY WARRANTY, without even the -#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -#// PURPOSE, except where stated in the Licence Agreement supplied with -#// the software. -#// -#// Created By : Simon Crowle -#// Created Date : 14/02/2018 -#// Created for Project : FLAME -#// -#///////////////////////////////////////////////////////////////////////// - -# NGINX -DEST_DIR="/usr/share/nginx/html" - -TEST_VIDEO="20180212104221flame-project-full.mp4" -TEST_VIDEO_ARCHIVE=$TEST_VIDEO".gz" -DEST_FILE=$DEST_DIR"/"$TEST_VIDEO_ARCHIVE - -echo "ftp://ftp.it-innovation.soton.ac.uk/testdata/video/"$TEST_VIDEO_ARCHIVE - -# Copy files for MPEG-DASH testing -curl "ftp://ftp.it-innovation.soton.ac.uk/testdata/video/"$TEST_VIDEO_ARCHIVE --user flame-rw:DR8ngj3ogSjd8gl -o $DEST_FILE -tar -xvf $DEST_FILE -C $DEST_DIR - -rm -rf $DEST_FILE -mv $DEST_DIR"/"$TEST_VIDEO $DEST_DIR"/"test_video diff --git a/clmctest/streaming/stop.sh b/clmctest/streaming/stop.sh deleted file mode 100644 index 55c8a28..0000000 --- a/clmctest/streaming/stop.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -""" -// © University of Southampton IT Innovation Centre, 2018 -// -// Copyright in this software belongs to University of Southampton -// IT Innovation Centre of Gamma House, Enterprise Road, -// Chilworth Science Park, Southampton, SO16 7NS, UK. -// -// This software may not be used, sold, licensed, transferred, copied -// or reproduced in whole or in part in any manner or form or in or -// on any media by any person other than in accordance with the terms -// of the Licence Agreement supplied with the software, or otherwise -// without the prior written consent of the copyright owners. -// -// This software is distributed WITHOUT ANY WARRANTY, without even the -// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -// PURPOSE, except where stated in the Licence Agreement supplied with -// the software. -// -// Created By : Michael Boniface -// Created Date : 15-02-2018 -// Created for Project : FLAME -""" - -for pid in $(ps -ef | grep "/usr/bin/vlc" | awk '{print $2}'); do kill -9 $pid; done -# TODO: 'killall vlc' should work: need to test though \ No newline at end of file diff --git a/clmctest/streaming/test_rspec.py b/clmctest/streaming/test_rspec.py deleted file mode 100644 index b90f501..0000000 --- a/clmctest/streaming/test_rspec.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/python3 -""" -// © University of Southampton IT Innovation Centre, 2018 -// -// Copyright in this software belongs to University of Southampton -// IT Innovation Centre of Gamma House, Enterprise Road, -// Chilworth Science Park, Southampton, SO16 7NS, UK. -// -// This software may not be used, sold, licensed, transferred, copied -// or reproduced in whole or in part in any manner or form or in or -// on any media by any person other than in accordance with the terms -// of the Licence Agreement supplied with the software, or otherwise -// without the prior written consent of the copyright owners. -// -// This software is distributed WITHOUT ANY WARRANTY, without even the -// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -// PURPOSE, except where stated in the Licence Agreement supplied with -// the software. -// -// Created By : Michael Boniface -// Created Date : 24-02-2018 -// Created for Project : FLAME -""" - -from subprocess import run -from platform import system -import pytest - - -@pytest.mark.parametrize("service_name", [ - 'clmc-service', - 'nginx1', - 'nginx2', - 'loadtest-streaming' -]) -def test_service_names(streaming_config, service_name): - """ - Tests the service names in the configuration. - - :param streaming_config: the configuration fixture collected from conftest.py - :param service_name the service name to test - """ - - assert any(s['name'] == service_name for s in streaming_config['hosts']), "{0} not in list of hosts".format(service_name) - print("\nSuccessfully passed configuration test for service name {0}\n".format(service_name)) - - -def test_ping(streaming_config): - """ - Pings each service to test for liveliness - - :param streaming_config: the configuration fixture collected from conftest.py - """ - - print("\n") # blank line printed for formatting purposes - - ping_count = 1 - system_dependent_param = "-n" if system().lower() == "windows" else "-c" - - for service in streaming_config['hosts']: - command = ["ping", system_dependent_param, str(ping_count), service['ip_address']] - assert run(command).returncode == 0, "Service ping test failed for {0} with ip address {1}".format(service['name'], service['ip_address']) - print("\nSuccessfully passed ping test for service: {0}\n".format(service['name'])) diff --git a/clmctest/streaming/test_rule1.json b/clmctest/streaming/test_rule1.json deleted file mode 100644 index 17d4cde..0000000 --- a/clmctest/streaming/test_rule1.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id" : "TestRule1", - "type" : "batch", - "dbrps" : [{"db": "CLMCMetrics", "rp" : "autogen"}], - - "script" : "var ruleData = batch\r\n |query(''' SELECT mean(\"handled\") AS \"mean_handled\" FROM \"CLMCMetrics\".\"autogen\".\"nginx\" WHERE \"ipendpoint\"='adaptive_streaming_I1_nginx1' ''')\r\n .period(5s)\r\n .every(5s)\r\n\r\nvar ruleAlert = ruleData\r\n |alert()\r\n .id('{{ .Name }}\/adaptive_streaming_I1_nginx1')\r\n .message('{{ .ID }} is {{ .Level }} Mean handled connections: {{ index .Fields \"mean_handled\" }}')\r\n .warn(lambda: \"mean_handled\" > 10)\r\n .log( '\/tmp\/TestRule1.log' )", - - "status" : "enabled" -} \ No newline at end of file diff --git a/clmctest/streaming/test_rule2.json b/clmctest/streaming/test_rule2.json deleted file mode 100644 index c9adb84..0000000 --- a/clmctest/streaming/test_rule2.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "id" : "TestRule2", - "type" : "batch", - "dbrps" : [{"db": "CLMCMetrics", "rp" : "autogen"}], - - "script" : "var ruleData = batch\r\n |query(''' SELECT mean(\"waiting\") AS \"mean_waiting\" FROM \"CLMCMetrics\".\"autogen\".\"nginx\" WHERE \"ipendpoint\"='adaptive_streaming_I1_nginx1' ''')\r\n .period(5s)\r\n .every(5s)\r\n\r\nvar ruleAlert = ruleData\r\n |alert()\r\n .id('{{ .Name }}\/adaptive_streaming_I1_nginx1')\r\n .message('{{ .ID }} is {{ .Level }} Mean waiting connections: {{ index .Fields \"mean_waiting\" }}')\r\n .warn(lambda: \"mean_waiting\" > 10)\r\n .log( '\/tmp\/TestRule2.log' )", - - "status" : "enabled" -} \ No newline at end of file diff --git a/clmctest/streaming/test_streaming.py b/clmctest/streaming/test_streaming.py deleted file mode 100644 index 9097b81..0000000 --- a/clmctest/streaming/test_streaming.py +++ /dev/null @@ -1,222 +0,0 @@ -#!/usr/bin/python3 -""" -// © University of Southampton IT Innovation Centre, 2018 -// -// Copyright in this software belongs to University of Southampton -// IT Innovation Centre of Gamma House, Enterprise Road, -// Chilworth Science Park, Southampton, SO16 7NS, UK. -// -// This software may not be used, sold, licensed, transferred, copied -// or reproduced in whole or in part in any manner or form or in or -// on any media by any person other than in accordance with the terms -// of the Licence Agreement supplied with the software, or otherwise -// without the prior written consent of the copyright owners. -// -// This software is distributed WITHOUT ANY WARRANTY, without even the -// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR -// PURPOSE, except where stated in the Licence Agreement supplied with -// the software. -// -// Created By : Michael Boniface -// Created Date : 19-03-2018 -// Created for Project : FLAME -""" - -from threading import Thread -from time import sleep -from queue import Queue -from xml.etree import ElementTree -from urllib.parse import urljoin -from os.path import isfile, dirname, join -from os import remove, system -import pytest -import requests -import json - - -class TestStreamingAlerts(object): - """ - A testing class used to group all the tests related to the streaming scenario. - """ - - kapacitor_url = "http://localhost:9092/kapacitor/v1/tasks" - - @pytest.mark.parametrize("rule, log", [ - ("rules.json", "/tmp/RPSLoad.log"), - ("test_rule1.json", "/tmp/TestRule1.log"), - ("test_rule2.json", "/tmp/TestRule2.log"), - ]) - def test_alerts(self, rule, log, streaming_url, streaming_manifest): - """ - This test case generates some streaming requests to the server to ensure an alert is triggered and then tests the log file for this alert. Different logs can be tested by - appending to the list of parameters in the pytest decorator. - - Format for pytest parameters under test: - ([filename], [log]) - where [filename] is the name of the json file for the rule under test (must be in the same folder as this test is) - [log] is the absolute path of the log file that must be created due to an alert - - :param rule: the name of the rule json file - :param log: the path of the log file that is under test - :param streaming_url: the fixture providing the streaming url for this test case - :param streaming_manifest: the fixture providing the root of the XML streaming manifest - """ - - kapacitor_setter = self.kapacitor_setting(rule, log) - next(kapacitor_setter) # Setup the test rule - - print("Testing alert creation for rule: {0}".format(rule)) - - segments = streaming_manifest.findall(".//{urn:mpeg:DASH:schema:MPD:2011}SegmentURL") - - threads_num = 30 - threads_queue = Queue(maxsize=threads_num) # a synchronized queue is used to track if all the threads has finished execution - threads = [StreamingThread(streaming_url, segments, threads_queue) for _ in range(threads_num)] - for t in threads: - t.start() - - alert_created = False - counter = 0 - time_delay = 2.5 - while True: - # loop while threads are execution and do a check every 2.5 seconds to check if either alert log has been created or threads have finished execution - sleep(time_delay) - if isfile(log): - for t in threads: # kill all running threads in case log file is created beforehand - t.stop() - alert_created = True - - if threads_queue.full(): - break - - counter += time_delay # the counter tracks the time taken; for the rules under test usually a 30 seconds time frame is enough to trigger the alert - if counter >= 12*time_delay: - for t in threads: # kill all running threads in case of test failure - t.stop() - break - - assert alert_created, "Alerts test failed: no log file is created indicating a triggered alert for rule {0}.".format(rule) - - print("Successfully passed alert creation test for rule: {0}.".format(rule)) - - next(kapacitor_setter) # Teardown the test rule - - def kapacitor_setting(self, rule, log): - """ - A generator function used to provide setUp/tearDown actions for a particular kapacitor rule. - On setUp rule is initialized, on tearDown rule is deleted. Interleaving is achieved using the generator pattern. - - :param rule: the name of the json file for the rule under test - :param log: the absolute path of the log file that's being tested - """ - - # check if the log file is already created due to a previous test - try: - if isfile(log): - remove(log) # delete log file if existing from previous tests - except PermissionError: - system("sudo rm {0}".format(log)) # handles the case for running on linux where permission will be required to delete the old log file - - # Initialization of the kapacitor rule - Test setUp (UnitTest style) - with open(join(dirname(__file__), rule), "r") as rule_file: - data = "".join(line.strip() for line in rule_file.readlines()) - - rule_data = json.loads(data) - requests.delete(url=urljoin(self.kapacitor_url + "/", rule_data.get("id"))) # delete in case of a task with the same ID already set in the kapacitor - r = requests.post(url=self.kapacitor_url, data=data, headers={"Content-Type": "application/json"}) - assert r.status_code == 200, "Couldn't create alert rule {0}".format(rule) - print("\nSuccessfully created test rule {0}".format(rule)) - - yield - - # Deleting the kapacitor rule used for testing - Test tearDown (UnitTest style) - requests.delete(url=urljoin(self.kapacitor_url + "/", rule_data.get("id"))) - - # check if the log file is created and clean it up - try: - if isfile(log): - remove(log) # delete log file if existing from previous tests - except PermissionError: - system("sudo rm {0}".format(log)) # handles the case for running on linux where permission will be required to delete the old log file - yield - - @staticmethod - @pytest.fixture(scope="class", params=[{"server": "http://192.168.50.11", "video": "/test_video/stream.mpd"}]) - def streaming_url(request): - """ - A fixture with class scope - used only in the scope of the testing class. - - :param request: the parameters for this fixture - server url and video relative url - :return: the combined URL for the video used for streaming - """ - - return urljoin(request.param["server"], request.param["video"]) - - @staticmethod - @pytest.fixture(scope="class") - def streaming_manifest(streaming_url): - """ - A fixture to download the manifest file for the streamed video and parse the downloaded XML content - - :param streaming_url: the fixture which provides the streaming url - :return: an XML root node object - """ - - manifest_xml = requests.get(streaming_url).text - root = ElementTree.fromstring(manifest_xml) - return root - - -class StreamingThread(Thread): - - def __init__(self, url, segments, queue): - """ - Subclassing the Thread class to create a custom streaming thread. - - :param url: the streaming url - :param segments: the list of SegmentURL XML nodes - :param queue: an auxiliary parameter used to indicate when this thread has finished execution - """ - - super(StreamingThread, self).__init__() - self.running = False - self.url = url - self.segments = segments - self.queue = queue - self._test_finished = False # a flag to indicate whether the thread should stop running - - def stop(self): - """ - Kill this thread and suspend its execution. - """ - - self._test_finished = True - - def run(self): - """ - A function, which simulates an actual streaming by downloading different audio/video segments from the server using a request session, - which leaves the connection open until executing. - """ - - size = len(self.segments) - size = size if size % 2 == 0 else size - 1 - - s = requests.session() - - for i in range(0, int(size / 2), 1): - segment_audio = self.segments[0] - segment_video = self.segments[int(size / 2) + i] - segment_audio_url = segment_audio.attrib.get('media') - segment_video_url = segment_video.attrib.get('media') - - s.get(urljoin(self.url, segment_audio_url)) - s.get(urljoin(self.url, segment_video_url)) - - # check if thread is killed in case the test has already succeeded - if self._test_finished: - break - - # a small time out to mimic the behaviour of a real streaming - sleep(2.5) - - self.queue.put(True) diff --git a/docs/Measuring-E2E-MS-Performance.md b/docs/Measuring-E2E-MS-Performance.md index 95a3e07..278444b 100644 --- a/docs/Measuring-E2E-MS-Performance.md +++ b/docs/Measuring-E2E-MS-Performance.md @@ -247,12 +247,12 @@ The aggregation process provides similar functionality to that of an INFLUX cont * Network delays query - to obtain the network delay values and group them by their **path_ID**, **source_SFR** and **target_SFR** identifiers: ``` -SELECT mean(delay) as "net_delay" FROM "E2EMetrics"."autogen"."network_delays" WHERE time >= now() - 10s and time < now() GROUP BY path_ID, source_SFR, target_SFR +SELECT mean(delay) as "net_delay" FROM "CLMCMetrics"."autogen"."network_delays" WHERE time >= now() - 10s and time < now() GROUP BY path_ID, source_SFR, target_SFR ``` * Media service response time query - to obtain the response time values of the media service instances and group them by **endpoint**, **sf_instance** and **sfr** identifiers: ``` -SELECT mean(response_time) as "response_time" FROM "E2EMetrics"."autogen"."service_delays" WHERE time >= now() - 10s and time < now() GROUP BY endpoint, sf_instance, sfr +SELECT mean(response_time) as "response_time" FROM "CLMCMetrics"."autogen"."service_delays" WHERE time >= now() - 10s and time < now() GROUP BY endpoint, sf_instance, sfr ``` The results of the queries are then matched against each other on the **target** and **sfr** tag values (for *network_delays* and *service_delays* respectively): diff --git a/docs/aggregation.md b/docs/aggregation.md index cc0e626..bedb0e2 100644 --- a/docs/aggregation.md +++ b/docs/aggregation.md @@ -83,12 +83,12 @@ the script executes at every 10-second-period querying the averaged data for the * Network delays query - to obtain the network delay values and group them by their **path** identifier: ``` -SELECT mean(delay) as "Dnet" FROM "E2EMetrics"."autogen".network_delays WHERE time >= now() - 10s and time < now() GROUP BY path +SELECT mean(delay) as "Dnet" FROM "CLMCMetrics"."autogen".network_delays WHERE time >= now() - 10s and time < now() GROUP BY path ``` * Media service response time query - to obtain the response time values of the media service instances and group them by **FQDN**, **sf_instance** and **endpoint** identifiers: ``` -SELECT mean(response_time) as "Dresponse" FROM "E2EMetrics"."autogen".service_delays WHERE time >= now() - 10s and time < now() GROUP BY FQDN, sf_instance, endpoint +SELECT mean(response_time) as "Dresponse" FROM "CLMCMetrics"."autogen".service_delays WHERE time >= now() - 10s and time < now() GROUP BY FQDN, sf_instance, endpoint ``` The results of the queries are then matched against each other on endpoint ID: on every match of the **endpoint** tag of the **service_delays** measurement with diff --git a/docs/clmc-service.md b/docs/clmc-service.md index a179f0e..c8ff020 100644 --- a/docs/clmc-service.md +++ b/docs/clmc-service.md @@ -51,7 +51,7 @@ All source code, tests and configuration files of the service can be found in th ```json { "aggregator_report_period": 5, - "aggregator_database_name": "E2EMetrics", + "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.40.231.51:8086" } ``` @@ -70,7 +70,7 @@ All source code, tests and configuration files of the service can be found in th ```json { "aggregator_report_period": 25, - "aggregator_database_name": "E2EMetrics", + "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.50.231.61:8086" } ``` @@ -85,7 +85,7 @@ All source code, tests and configuration files of the service can be found in th ```json { "aggregator_report_period": 25, - "aggregator_database_name": "E2EMetrics", + "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.50.231.61:8086" } ``` @@ -101,7 +101,7 @@ All source code, tests and configuration files of the service can be found in th ```json { "aggregator_report_period": 125, - "aggregator_database_name": "E2EMetrics", + "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.50.231.61:8086/", "malformed": true, "comment": "Aggregator is running in a malformed state - it uses an old version of the configuration. Please, restart it so that the updated configuration is used." diff --git a/scripts/clmc-service/install.sh b/scripts/clmc-service/install.sh index acd817f..cb581bc 100755 --- a/scripts/clmc-service/install.sh +++ b/scripts/clmc-service/install.sh @@ -135,7 +135,7 @@ fi # navigate to the clmc-webservice - and check echo "----> Moving to CLMC webservice" -cd /vagrant/src/clmcwebservice +cd ${REPO_ROOT}/src/service if [ $? -ne 0 ] ; then echo "Failed: could not find clmc-webservice" exit 1 @@ -159,15 +159,18 @@ else echo "CLMC service started." fi +# wait for the clmc service to start while ! nc -z localhost 9080 do echo "Waiting for clmc service port 9080 to be ready on localhost..." sleep 5 done +# configure the CLMC service JSON="{\"aggregator_report_period\": ${REPORT_PERIOD}, \"aggregator_database_name\": \"${DATABASE_NAME}\", \"aggregator_database_url\": \"${INFLUX_URL}\"}" curl -H 'Content-Type: application/json' -X PUT -d "${JSON}" http://localhost:9080/aggregator/config +# start the aggregator JSON="{\"action\": \"start\"}" curl -H 'Content-Type: application/json' -X PUT -d "${JSON}" http://localhost:9080/aggregator/control diff --git a/src/clmcagent/__init__.py b/src/clmcagent/__init__.py deleted file mode 100644 index 44f7725..0000000 --- a/src/clmcagent/__init__.py +++ /dev/null @@ -1 +0,0 @@ -#!/usr/bin/python3 \ No newline at end of file diff --git a/src/clmcagent/config_collector.py b/src/clmcagent/config_collector.py deleted file mode 100644 index 6ebed75..0000000 --- a/src/clmcagent/config_collector.py +++ /dev/null @@ -1,225 +0,0 @@ -#!/usr/bin/python3 -import threading -import time -import random -import logging -from influxdb import InfluxDBClient - -logging.basicConfig(level=logging.DEBUG) -logger = logging.getLogger() - -class ConfigCollector(threading.Thread): - STATE_NAME = 0 - STATE_TIME = 1 - - def __init__(self, sample_func, write_func, resource_name, sample_rate=2, agg_period=10): - threading.Thread.__init__(self) - self._start_event = threading.Event() - self.sample_func = sample_func - self.write_func = write_func - self.resource_name = resource_name - self.sample_rate = sample_rate - self.agg_period = agg_period - self.agg_states = {} - self.current_measurement = {} - return - - def run(self): - # if thread running then return - if(self._start_event.is_set()): - return - self._start_event.set() - - # set start period to current time - start_period = time.time() - logger.debug("start time = {0}".format(start_period)) - # set end period to the aggregation period - end_period = start_period + self.agg_period - logger.debug("end time = {0}".format(end_period)) - # initialise the time in the current state - current_state_time = 0 - samples = [] - while(self._start_event.is_set()): - # get sample using sampler function - (sample_state, sample_time) = self.sample_func() - # add sample to list of samples - samples.append((sample_state, sample_time)) - logger.debug("Sample state {0}".format(sample_state)) - logger.debug("Sample count: {0}".format(len(samples))) - # if last sample was at the end of the aggregation period then process - if sample_time >= end_period: - # aggregate samples into single measurement - self.current_measurement = self.create_measurement(samples, current_state_time, sample_time) - # write output - write_thread = WriteThread(self.write_func, self.current_measurement) - write_thread.start() - # set time in current state - current_state_time = self.current_measurement[0]['fields']['current_state_time'] - # remove all processed samples - samples.clear() - # add last sample as 1st sample of the next period - samples.append((sample_state, sample_time)) - # set new end period - end_period = sample_time + self.agg_period - logger.debug("Number of samples after agg: {0}".format(len(samples))) - logger.debug("Next end time {0}".format(end_period)) - - # calc how long it took to process samples - processing_time = time.time() - sample_time - logger.debug("Processing time {0}".format(processing_time)) - # calc the remaining time to wait until next sample - sleep_time = self.sample_rate - processing_time - logger.debug("Sleep time {0}".format(sleep_time)) - # if processing took longer than the sample rate we have a problemm - # and we will need to put processing into a worker thread - if(sleep_time < 0): - logger.warn("Aggregation processing took longer that sample rate") - sleep_time = 0 - logger.debug("Sleeping for sample {0}".format(sleep_time)) - # wait for the next sample - time.sleep(sleep_time) - logger.debug("Finished collection thread") - return - - def stop(self): - logger.debug("Stopping thread") - self._start_event.clear() - - def create_measurement(self, samples, initial_state_time, current_time): - logger.debug("Samples: {0}".format(str(samples))) - - # aggregate samples into states - states = self.aggregate_samples(samples) - logger.debug("States: {0}".format(str(states))) - - # aggregate the states into a measurement - fields = self.aggregate_states(states, initial_state_time) - measurement_time = int(current_time*1000000000) - measurement = [{"measurement": "service_config_state", - "tags": { - "resource_name": self.resource_name - }, - "time": measurement_time - }] - measurement[0]['fields'] = fields['fields'] - logger.debug("Report: {0}".format(str(measurement))) - - return measurement - - def aggregate_samples(self, samples): - states = [] - - sample_count = len(samples) - logger.debug("Sample count {0}".format(sample_count)) - # error if no samples to aggregate - if sample_count == 0: - raise ValueError('No samples in the samples list') - - # no aggregation needed if only one sample - if sample_count == 1: - return samples[0] - - # aggregate samples - last_index = sample_count-1 - for index, sample in enumerate(samples): - # for the 1st sample we set the current state and state_start_time - if index == 0: - current_state = sample[self.STATE_NAME] - state_start_time = sample[self.STATE_TIME] - logger.debug("Start time : {0}".format(state_start_time)) - else: - # add state duration for previous state after transition - if current_state != sample[self.STATE_NAME]: - # calc time in current state - state_time = sample[self.STATE_TIME] - state_start_time - states.append([current_state,state_time]) - # set the state to the next state - current_state = sample[self.STATE_NAME] - # set the start time of the next state - state_start_time = state_start_time + state_time - # deal with the final sample - if index == last_index: - # calc state duration if last sample is the same as previous state - if current_state == sample[self.STATE_NAME]: - state_time = sample[self.STATE_TIME] - state_start_time - states.append([current_state,state_time]) - # add transition in final sample with zero duration - elif current_state != sample[self.STATE_NAME]: - states.append([current_state,0]) - return states - - def aggregate_states(self, states, initial_state_time): - # set initial state to the 1st sample - initial_state = states[0][self.STATE_NAME] - logger.debug("Initial state : {0}".format(initial_state)) - logger.debug("Initial state time : {0}".format(initial_state_time)) - # set the current state as the last state sampled - current_state = states[-1][self.STATE_NAME] - # if no change in state take the initial state time and add current state time - if initial_state == current_state and len(states) == 1: - current_state_time = initial_state_time + states[-1][self.STATE_TIME] - state_sum_key = current_state + "_sum" - state_count_key = current_state + "_count" - # initialise the number of transitions if it's the 1st time - if state_sum_key not in self.agg_states: - self.agg_states[state_count_key] = 1 - self.agg_states[state_sum_key] = current_state_time - else: - # current state time is the last state time - current_state_time = states[-1][self.STATE_TIME] - # calc the total duration and number of transitions in each state. - for state in states: - # if first occurance of state add with initial duration and a single transition - state_sum_key = state[self.STATE_NAME] + "_sum" - state_count_key = state[self.STATE_NAME] + "_count" - if state_sum_key not in self.agg_states: - logger.debug("Adding state: {0}".format(state[self.STATE_NAME])) - self.agg_states[state_sum_key] = state[self.STATE_TIME] - self.agg_states[state_count_key] = 1 - else: - logger.debug("Aggregating state: {0}".format(state[self.STATE_NAME])) - # increment number of times in the state - self.agg_states[state_count_key] += 1 - logger.debug("increment number of times in the state") - # add state time to aggregate total - self.agg_states[state_sum_key] += state[self.STATE_TIME] - logger.debug("Duration: {0}".format(self.agg_states[state_sum_key])) - - # Create report - measurement = {} - measurement['fields'] = self.agg_states - measurement['fields']['current_state'] = current_state - measurement['fields']['current_state_time'] = current_state_time - return measurement - -class WriteThread(threading.Thread): - def __init__(self, write_func, measurement): - threading.Thread.__init__(self) - self._start_event = threading.Event() - self.write_func = write_func - self.measurement = measurement - return - - def run(self): - # if thread running then return - if(self._start_event.is_set()): - return - self._start_event.set() - self.write_func(self.measurement) - - def stop(self): - self._start_event.clear() - -class InfluxWriter(): - def __init__(self, hostname, port, database): - self.db_client = InfluxDBClient(host=hostname, port=port, database=database, timeout=10) - return - - def write(self, measurement): - # if thread running then return - try: - points = [] - points.append(measurement) - self.db_client.write_points(points) - except Exception as e: - print(e) diff --git a/src/clmcagent/stop_systemctl_monitor.sh b/src/clmcagent/stop_systemctl_monitor.sh deleted file mode 100644 index 5cd1d4d..0000000 --- a/src/clmcagent/stop_systemctl_monitor.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -pid=`ps aux | egrep "[s]ystemctl_monitor.py" | awk '{ print $2 }'` && kill $pid \ No newline at end of file diff --git a/src/clmcagent/systemctl_monitor.py b/src/clmcagent/systemctl_monitor.py deleted file mode 100644 index db2cefc..0000000 --- a/src/clmcagent/systemctl_monitor.py +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/python3 -import argparse -import subprocess -import logging -import time -import urllib.parse -from config_collector import ConfigCollector, InfluxWriter -from influxdb import InfluxDBClient - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger() - -class SystemctlMonitor: - ACTIVE_STATE_KEY='ActiveState' - SUBSTATE_KEY='SubState' - LOAD_STATE_KEY='LoadState' - - def __init__(self, service_name, sample_rate, agg_period, hostname, port, database): - self.service_name = service_name - self.writer = InfluxWriter(hostname, port, database) - self.collection_thread = ConfigCollector(self.get_systemctl_sample, self.writer.write_func, self.service_name, sample_rate, agg_period) - - def start(self): - self.collection_thread.start() - - def stop(self): - self.collection_thread.stop() - - def get_current_measurement(self): - return self.collection_thread.current_measurement - - def get_systemctl_sample(self): - return (self.get_systemctl_status(self.service_name), time.time()) - - def get_systemctl_status(self, service_name): - load_state = 'unknown' - active_state = 'unknown' - sub_state = 'unknown' - - cmd = "systemctl show {0}".format(service_name) - proc = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True) - out, err = proc.communicate() - if out: out = out.decode('ascii') - if err: err = err.decode('ascii') - logger.debug("Return code = {0}".format(proc.returncode)) - if proc.returncode != 0: - logger.error("Could not get status for service {0}, {1}".format(service_name, err)) - raise Exception("Could not get status for service {0}, {1}".format(service_name, err)) - - for line in iter(out.splitlines()): - parts = line.split('=') - if parts[0] == SystemctlMonitor.LOAD_STATE_KEY: - load_state = parts[1] - elif parts[0] == SystemctlMonitor.ACTIVE_STATE_KEY: - active_state = parts[1] - elif parts[0] == SystemctlMonitor.SUBSTATE_KEY: - sub_state = parts[1] - return load_state + "." + active_state + "." + sub_state - - -def main(): - - parser = argparse.ArgumentParser(description='systemctrl state monitor') - parser.add_argument('-service', help='service name', required=True) - parser.add_argument('-rate', help='sample rate', required=True) - parser.add_argument('-agg', help='aggregation period', required=True) - parser.add_argument('-host', help='telegraf hostname', required=True) - parser.add_argument('-port', help='telegraf port', required=True) - parser.add_argument('-db', help='database name', required=True) - parser.add_argument('-debug', '--debug', action='store_true') - args = parser.parse_args() - print("Starting SystemctlMonitor : {0}, {1}, {2}, {3}, {4}, {5}".format(args.service, args.rate, args.agg, args.host, args.port, args.db)) - - if args.debug == True: - print("Setting logging level to to DEBUG") - logger.setLevel(logging.DEBUG) - else: - logger.setLevel(logging.INFO) - - mon = SystemctlMonitor(args.service, int(args.rate), int(args.agg), args.host, int(args.port), args.db) - mon.start() - -if __name__== "__main__": - main() - - diff --git a/src/clmcwebservice/.coveragerc b/src/service/.coveragerc similarity index 100% rename from src/clmcwebservice/.coveragerc rename to src/service/.coveragerc diff --git a/src/clmcwebservice/MANIFEST.in b/src/service/MANIFEST.in similarity index 100% rename from src/clmcwebservice/MANIFEST.in rename to src/service/MANIFEST.in diff --git a/src/clmcwebservice/clmcservice/__init__.py b/src/service/clmcservice/__init__.py similarity index 100% rename from src/clmcwebservice/clmcservice/__init__.py rename to src/service/clmcservice/__init__.py diff --git a/src/clmcwebservice/clmcservice/aggregator.py b/src/service/clmcservice/aggregator.py similarity index 99% rename from src/clmcwebservice/clmcservice/aggregator.py rename to src/service/clmcservice/aggregator.py index 18a3317..ab855d9 100644 --- a/src/clmcwebservice/clmcservice/aggregator.py +++ b/src/service/clmcservice/aggregator.py @@ -37,7 +37,7 @@ class Aggregator(object): """ REPORT_PERIOD = 5 # default report period is 5s, that is every 5 seconds the mean delay values for the last 5 seconds are aggregated - DATABASE = 'E2EMetrics' # default database the aggregator uses + DATABASE = 'CLMCMetrics' # default database the aggregator uses DATABASE_URL = 'http://203.0.113.100:8086' # default database URL the aggregator uses RETRY_PERIOD = 5 # number of seconds to wait before retrying connection/posting data to Influx diff --git a/src/clmcwebservice/clmcservice/tests.py b/src/service/clmcservice/tests.py similarity index 94% rename from src/clmcwebservice/clmcservice/tests.py rename to src/service/clmcservice/tests.py index eee634b..581608e 100644 --- a/src/clmcwebservice/clmcservice/tests.py +++ b/src/service/clmcservice/tests.py @@ -43,7 +43,7 @@ class TestAggregatorAPI(object): self.config = testing.setUp() self.config.add_settings({'aggregator_running': False, 'malformed': False, 'aggregator_report_period': 5, - 'aggregator_database_name': 'E2EMetrics', 'aggregator_database_url': "http://172.40.231.51:8086"}) + 'aggregator_database_name': 'CLMCMetrics', 'aggregator_database_url': "http://172.40.231.51:8086"}) yield @@ -57,31 +57,31 @@ class TestAggregatorAPI(object): from clmcservice.views import AggregatorConfig # nested import so that importing the class view is part of the test itself assert self.config.get_settings().get('aggregator_report_period') == 5, "Initial report period is 5 seconds." - assert self.config.get_settings().get('aggregator_database_name') == 'E2EMetrics', "Initial database name the aggregator uses is E2EMetrics." + assert self.config.get_settings().get('aggregator_database_name') == 'CLMCMetrics', "Initial database name the aggregator uses is CLMCMetrics." assert self.config.get_settings().get('aggregator_database_url') == "http://172.40.231.51:8086", "Initial aggregator url is http://172.40.231.51:8086" request = testing.DummyRequest() response = AggregatorConfig(request).get() assert response == {'aggregator_report_period': 5, - 'aggregator_database_name': 'E2EMetrics', + 'aggregator_database_name': 'CLMCMetrics', 'aggregator_database_url': "http://172.40.231.51:8086"}, "Response must be a dictionary representing a JSON object with the correct configuration data of the aggregator." assert self.config.get_settings().get('aggregator_report_period') == 5, "A GET request must not modify the aggregator configuration data." - assert self.config.get_settings().get('aggregator_database_name') == 'E2EMetrics', "A GET request must not modify the aggregator configuration data." + assert self.config.get_settings().get('aggregator_database_name') == 'CLMCMetrics', "A GET request must not modify the aggregator configuration data." assert self.config.get_settings().get('aggregator_database_url') == "http://172.40.231.51:8086", "A GET request must not modify the aggregator configuration data." @pytest.mark.parametrize("input_body, output_value", [ ('{"aggregator_report_period": 10, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://171.40.231.51:8086"}', {'aggregator_report_period': 10, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://171.40.231.51:8086"}), - ('{"aggregator_report_period": 15, "aggregator_database_name": "E2EMetrics", "aggregator_database_url": "http://172.50.231.51:8086"}', - {'aggregator_report_period': 15, 'aggregator_database_name': "E2EMetrics", 'aggregator_database_url': "http://172.50.231.51:8086"}), + ('{"aggregator_report_period": 15, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.50.231.51:8086"}', + {'aggregator_report_period': 15, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://172.50.231.51:8086"}), ('{"aggregator_report_period": 20, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.60.231.51:8086"}', {'aggregator_report_period': 20, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://172.60.231.51:8086"}), ('{"aggregator_report_period": 25, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.60.231.51:8086"}', {'aggregator_report_period': 25, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://172.60.231.51:8086"}), - ('{"aggregator_report_period": 200, "aggregator_database_name": "E2EMetrics", "aggregator_database_url": "https://172.50.231.51:8086"}', - {'aggregator_report_period': 200, 'aggregator_database_name': "E2EMetrics", 'aggregator_database_url': "https://172.50.231.51:8086"}), + ('{"aggregator_report_period": 200, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "https://172.50.231.51:8086"}', + {'aggregator_report_period': 200, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "https://172.50.231.51:8086"}), ('{"aggregator_report_period": 150, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "https://localhost:8086"}', {'aggregator_report_period': 150, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "https://localhost:8086"}), ("{aggregator_report_period: 2hb5, aggregator_database_name: CLMCMetrics, aggregator_database_url: http://172.60.231.51:8086}", None), @@ -103,7 +103,7 @@ class TestAggregatorAPI(object): assert not self.config.get_settings().get(RUNNING_FLAG), "Initially aggregator is not running." assert self.config.get_settings().get('aggregator_report_period') == 5, "Initial report period is 5 seconds." - assert self.config.get_settings().get('aggregator_database_name') == 'E2EMetrics', "Initial database name the aggregator uses is E2EMetrics." + assert self.config.get_settings().get('aggregator_database_name') == 'CLMCMetrics', "Initial database name the aggregator uses is CLMCMetrics." assert self.config.get_settings().get('aggregator_database_url') == "http://172.40.231.51:8086", "Initial aggregator url is http://172.40.231.51:8086" request = testing.DummyRequest() @@ -298,7 +298,7 @@ class TestAggregatorAPI(object): assert not self.config.get_settings().get(MALFORMED_FLAG), "Initially aggregator is not in a malformed state" assert self.config.get_settings().get(PROCESS_ATTRIBUTE) is None, "Initially no aggregator process is running." assert self.config.get_settings().get('aggregator_report_period') == 5, "Initial report period is 5 seconds." - assert self.config.get_settings().get('aggregator_database_name') == 'E2EMetrics', "Initial database name the aggregator uses is E2EMetrics." + assert self.config.get_settings().get('aggregator_database_name') == 'CLMCMetrics', "Initial database name the aggregator uses is CLMCMetrics." assert self.config.get_settings().get('aggregator_database_url') == "http://172.40.231.51:8086", "Initial aggregator url is http://172.40.231.51:8086" # start the aggregator with the default configuration @@ -310,8 +310,8 @@ class TestAggregatorAPI(object): assert response == {RUNNING_FLAG: True}, "The aggregator should have been started." # update the configuration of the aggregator while it is running - config_body = '{"aggregator_report_period": 15, "aggregator_database_name": "E2EMetrics", "aggregator_database_url": "http://172.50.231.51:8086"}' - output_body = {'aggregator_report_period': 15, 'aggregator_database_name': "E2EMetrics", 'aggregator_database_url': "http://172.50.231.51:8086", 'malformed': True, + config_body = '{"aggregator_report_period": 15, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.50.231.51:8086"}' + output_body = {'aggregator_report_period': 15, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://172.50.231.51:8086", 'malformed': True, 'comment': 'Aggregator is running in a malformed state - it uses an old version of the configuration. Please, restart it so that the updated configuration is used.'} request = testing.DummyRequest() request.body = config_body.encode(request.charset) @@ -341,8 +341,8 @@ class TestAggregatorAPI(object): assert self.config.get_settings().get(PROCESS_ATTRIBUTE) is not None, "The aggregator should have been restarted." # update the configuration again while the aggregator is running - config_body = '{"aggregator_report_period": 30, "aggregator_database_name": "E2EMetrics", "aggregator_database_url": "http://172.50.231.51:8086"}' - output_body = {'aggregator_report_period': 30, 'aggregator_database_name': "E2EMetrics", 'aggregator_database_url': "http://172.50.231.51:8086", 'malformed': True, + config_body = '{"aggregator_report_period": 30, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://172.50.231.51:8086"}' + output_body = {'aggregator_report_period': 30, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://172.50.231.51:8086", 'malformed': True, 'comment': 'Aggregator is running in a malformed state - it uses an old version of the configuration. Please, restart it so that the updated configuration is used.'} request = testing.DummyRequest() request.body = config_body.encode(request.charset) diff --git a/src/clmcwebservice/clmcservice/utilities.py b/src/service/clmcservice/utilities.py similarity index 100% rename from src/clmcwebservice/clmcservice/utilities.py rename to src/service/clmcservice/utilities.py diff --git a/src/clmcwebservice/clmcservice/views.py b/src/service/clmcservice/views.py similarity index 100% rename from src/clmcwebservice/clmcservice/views.py rename to src/service/clmcservice/views.py diff --git a/src/clmcwebservice/development.ini b/src/service/development.ini similarity index 96% rename from src/clmcwebservice/development.ini rename to src/service/development.ini index 4b5384d..269aea6 100644 --- a/src/clmcwebservice/development.ini +++ b/src/service/development.ini @@ -14,7 +14,7 @@ pyramid.default_locale_name = en pyramid.includes = pyramid_debugtoolbar aggregator_running = false aggregator_report_period = 5 -aggregator_database_name = E2EMetrics +aggregator_database_name = CLMCMetrics aggregator_database_url = http://172.40.231.51:8086 # By default, the toolbar only appears for clients from IP addresses diff --git a/src/clmcwebservice/production.ini b/src/service/production.ini similarity index 100% rename from src/clmcwebservice/production.ini rename to src/service/production.ini diff --git a/src/clmcwebservice/pytest.ini b/src/service/pytest.ini similarity index 100% rename from src/clmcwebservice/pytest.ini rename to src/service/pytest.ini diff --git a/src/clmcwebservice/setup.py b/src/service/setup.py similarity index 100% rename from src/clmcwebservice/setup.py rename to src/service/setup.py diff --git a/src/clmcwebservice/tox.ini b/src/service/tox.ini similarity index 100% rename from src/clmcwebservice/tox.ini rename to src/service/tox.ini diff --git a/MANIFEST.in b/src/test/MANIFEST.in similarity index 100% rename from MANIFEST.in rename to src/test/MANIFEST.in diff --git a/clmctest/__init__.py b/src/test/clmctest/__init__.py similarity index 100% rename from clmctest/__init__.py rename to src/test/clmctest/__init__.py diff --git a/clmctest/e2e_response_time/rspec.yml b/src/test/clmctest/e2e_response_time/rspec.yml similarity index 100% rename from clmctest/e2e_response_time/rspec.yml rename to src/test/clmctest/e2e_response_time/rspec.yml diff --git a/clmctest/inputs/__init__.py b/src/test/clmctest/inputs/__init__.py similarity index 100% rename from clmctest/inputs/__init__.py rename to src/test/clmctest/inputs/__init__.py diff --git a/clmctest/inputs/conftest.py b/src/test/clmctest/inputs/conftest.py similarity index 100% rename from clmctest/inputs/conftest.py rename to src/test/clmctest/inputs/conftest.py diff --git a/clmctest/inputs/rspec.yml b/src/test/clmctest/inputs/rspec.yml similarity index 100% rename from clmctest/inputs/rspec.yml rename to src/test/clmctest/inputs/rspec.yml diff --git a/clmctest/inputs/test_config_collector.py b/src/test/clmctest/inputs/test_config_collector.py similarity index 100% rename from clmctest/inputs/test_config_collector.py rename to src/test/clmctest/inputs/test_config_collector.py diff --git a/clmctest/inputs/test_rspec.py b/src/test/clmctest/inputs/test_rspec.py similarity index 100% rename from clmctest/inputs/test_rspec.py rename to src/test/clmctest/inputs/test_rspec.py diff --git a/clmctest/inputs/test_systemctl_mon.py b/src/test/clmctest/inputs/test_systemctl_mon.py similarity index 100% rename from clmctest/inputs/test_systemctl_mon.py rename to src/test/clmctest/inputs/test_systemctl_mon.py diff --git a/clmctest/inputs/test_telegraf_agents.py b/src/test/clmctest/inputs/test_telegraf_agents.py similarity index 100% rename from clmctest/inputs/test_telegraf_agents.py rename to src/test/clmctest/inputs/test_telegraf_agents.py diff --git a/clmctest/monitoring/E2ESim.py b/src/test/clmctest/monitoring/E2ESim.py similarity index 100% rename from clmctest/monitoring/E2ESim.py rename to src/test/clmctest/monitoring/E2ESim.py diff --git a/clmctest/monitoring/E2ETestAggregatorThread.py b/src/test/clmctest/monitoring/E2ETestAggregatorThread.py similarity index 97% rename from clmctest/monitoring/E2ETestAggregatorThread.py rename to src/test/clmctest/monitoring/E2ETestAggregatorThread.py index 991c33d..a6c0097 100644 --- a/clmctest/monitoring/E2ETestAggregatorThread.py +++ b/src/test/clmctest/monitoring/E2ETestAggregatorThread.py @@ -30,7 +30,7 @@ from clmcservice.aggregator import Aggregator class TestAggregator(Thread): REPORT_PERIOD = 5 # currently, report period is 5s, that is every 5 seconds the mean delay values for the last 5 seconds are aggregated - DATABASE = 'E2EMetrics' # default database the aggregator uses + DATABASE = 'CLMCMetrics' # default database the aggregator uses DATABASE_URL = 'http://203.0.113.100:8086' # default database URL the aggregator uses def __init__(self, database=DATABASE, database_url=DATABASE_URL, report_period=REPORT_PERIOD): diff --git a/clmctest/monitoring/LineProtocolGenerator.py b/src/test/clmctest/monitoring/LineProtocolGenerator.py similarity index 100% rename from clmctest/monitoring/LineProtocolGenerator.py rename to src/test/clmctest/monitoring/LineProtocolGenerator.py diff --git a/clmctest/monitoring/StreamingSim.py b/src/test/clmctest/monitoring/StreamingSim.py similarity index 100% rename from clmctest/monitoring/StreamingSim.py rename to src/test/clmctest/monitoring/StreamingSim.py diff --git a/clmctest/monitoring/__init__.py b/src/test/clmctest/monitoring/__init__.py similarity index 100% rename from clmctest/monitoring/__init__.py rename to src/test/clmctest/monitoring/__init__.py diff --git a/clmctest/monitoring/conftest.py b/src/test/clmctest/monitoring/conftest.py similarity index 100% rename from clmctest/monitoring/conftest.py rename to src/test/clmctest/monitoring/conftest.py diff --git a/clmctest/monitoring/rspec.yml b/src/test/clmctest/monitoring/rspec.yml similarity index 100% rename from clmctest/monitoring/rspec.yml rename to src/test/clmctest/monitoring/rspec.yml diff --git a/clmctest/monitoring/test_e2eresults.py b/src/test/clmctest/monitoring/test_e2eresults.py similarity index 94% rename from clmctest/monitoring/test_e2eresults.py rename to src/test/clmctest/monitoring/test_e2eresults.py index 0b8c8d1..c750606 100644 --- a/clmctest/monitoring/test_e2eresults.py +++ b/src/test/clmctest/monitoring/test_e2eresults.py @@ -62,15 +62,15 @@ class TestE2ESimulation(object): e2e_aggregator.stop() @pytest.mark.parametrize("query, expected_result", [ - ('SELECT count(*) FROM "E2EMetrics"."autogen"."network_delays"', + ('SELECT count(*) FROM "CLMCMetrics"."autogen"."network_delays"', {"time": "1970-01-01T00:00:00Z", "count_latency": 120, "count_bandwidth": 120}), - ('SELECT count(*) FROM "E2EMetrics"."autogen"."service_delays"', + ('SELECT count(*) FROM "CLMCMetrics"."autogen"."service_delays"', {"time": "1970-01-01T00:00:00Z", "count_response_time": 24, "count_request_size": 24, "count_response_size": 24}), - ('SELECT count(*) FROM "E2EMetrics"."autogen"."e2e_delays"', + ('SELECT count(*) FROM "CLMCMetrics"."autogen"."e2e_delays"', {"time": "1970-01-01T00:00:00Z", "count_delay_forward": 38, "count_delay_reverse": 38, "count_delay_service": 38, "count_avg_request_size": 38, "count_avg_response_size": 38, "count_avg_bandwidth": 38}), - ('SELECT mean(*) FROM "E2EMetrics"."autogen"."e2e_delays"', + ('SELECT mean(*) FROM "CLMCMetrics"."autogen"."e2e_delays"', {"time": "1970-01-01T00:00:00Z", "mean_delay_forward": 8.010964912280702, "mean_delay_reverse": 12.881578947368423, "mean_delay_service": 23.42105263157895, 'mean_avg_request_size': 10485760, 'mean_avg_response_size': 1024, 'mean_avg_bandwidth': 104857600}), ]) diff --git a/clmctest/monitoring/test_rspec.py b/src/test/clmctest/monitoring/test_rspec.py similarity index 100% rename from clmctest/monitoring/test_rspec.py rename to src/test/clmctest/monitoring/test_rspec.py diff --git a/clmctest/monitoring/test_simresults.py b/src/test/clmctest/monitoring/test_simresults.py similarity index 100% rename from clmctest/monitoring/test_simresults.py rename to src/test/clmctest/monitoring/test_simresults.py diff --git a/clmctest/scripts/__init__.py b/src/test/clmctest/scripts/__init__.py similarity index 100% rename from clmctest/scripts/__init__.py rename to src/test/clmctest/scripts/__init__.py diff --git a/clmctest/scripts/rspec.yml b/src/test/clmctest/scripts/rspec.yml similarity index 100% rename from clmctest/scripts/rspec.yml rename to src/test/clmctest/scripts/rspec.yml diff --git a/clmctest/scripts/test_config_telegraf.py b/src/test/clmctest/scripts/test_config_telegraf.py similarity index 94% rename from clmctest/scripts/test_config_telegraf.py rename to src/test/clmctest/scripts/test_config_telegraf.py index 72ed589..f04471d 100644 --- a/clmctest/scripts/test_config_telegraf.py +++ b/src/test/clmctest/scripts/test_config_telegraf.py @@ -35,6 +35,7 @@ def test_write_telegraf_conf(): SF_ID="streaming_service" SF_ID_INSTANCE="streaming_service_instance" IP_ENDPOINT_ID="endpoint" + SR_ID="sr_id" INFLUXDB_URL="http://172.29.236.10" DATABASE_NAME="experimentation_database" @@ -61,7 +62,7 @@ def test_write_telegraf_conf(): assert code == 1, "Failed to return error on incorrect arguments : " + str(code) + ", cmd=" + cmd # run template relacement script with all arguments - cmd = 'sudo /vagrant/scripts/clmc-agent/configure.sh ' + LOCATION + ' ' + SFC_ID + ' ' + SFC_ID_INSTANCE + ' ' + SF_ID + ' ' + SF_ID_INSTANCE + ' ' + IP_ENDPOINT_ID + ' ' + INFLUXDB_URL + ' ' + DATABASE_NAME + cmd = 'sudo /vagrant/scripts/clmc-agent/configure.sh ' + LOCATION + ' ' + SFC_ID + ' ' + SFC_ID_INSTANCE + ' ' + SF_ID + ' ' + SF_ID_INSTANCE + ' ' + IP_ENDPOINT_ID + ' ' + SR_ID + ' ' + INFLUXDB_URL + ' ' + DATABASE_NAME (out, err, code) = run_command(cmd) assert code == 0, "Configure command returned error, output=" + str(out) + ", cmd=" + cmd @@ -75,7 +76,8 @@ def test_write_telegraf_conf(): assert lines.find(SFC_ID_INSTANCE), "Cannot find sfc_id_instance" assert lines.find(SF_ID), "Cannot find sfc_id" assert lines.find(SF_ID_INSTANCE), "Cannot find sf_id_instance" - assert lines.find(IP_ENDPOINT_ID), "Cannot find location" + assert lines.find(IP_ENDPOINT_ID), "Cannot find endpoint id" + assert lines.find(SR_ID), "Cannot find sr_id" except FileNotFoundError: assert False, "Telegraf general conf file not found, " + TELEGRAF_GENERAL_CONF_FILE diff --git a/clmctest/services/apache/install.sh b/src/test/clmctest/services/apache/install.sh old mode 100755 new mode 100644 similarity index 100% rename from clmctest/services/apache/install.sh rename to src/test/clmctest/services/apache/install.sh diff --git a/clmctest/services/apache/telegraf_apache.conf b/src/test/clmctest/services/apache/telegraf_apache.conf similarity index 100% rename from clmctest/services/apache/telegraf_apache.conf rename to src/test/clmctest/services/apache/telegraf_apache.conf diff --git a/clmctest/services/ffmpeg/install.sh b/src/test/clmctest/services/ffmpeg/install.sh old mode 100755 new mode 100644 similarity index 100% rename from clmctest/services/ffmpeg/install.sh rename to src/test/clmctest/services/ffmpeg/install.sh diff --git a/clmctest/services/ffmpeg/telegraf_ffmpeg.conf b/src/test/clmctest/services/ffmpeg/telegraf_ffmpeg.conf similarity index 100% rename from clmctest/services/ffmpeg/telegraf_ffmpeg.conf rename to src/test/clmctest/services/ffmpeg/telegraf_ffmpeg.conf diff --git a/clmctest/services/ffmpeg/transcode.sh b/src/test/clmctest/services/ffmpeg/transcode.sh old mode 100755 new mode 100644 similarity index 100% rename from clmctest/services/ffmpeg/transcode.sh rename to src/test/clmctest/services/ffmpeg/transcode.sh diff --git a/clmctest/services/host/install.sh b/src/test/clmctest/services/host/install.sh old mode 100755 new mode 100644 similarity index 100% rename from clmctest/services/host/install.sh rename to src/test/clmctest/services/host/install.sh diff --git a/clmctest/services/host/telegraf_host.conf b/src/test/clmctest/services/host/telegraf_host.conf similarity index 100% rename from clmctest/services/host/telegraf_host.conf rename to src/test/clmctest/services/host/telegraf_host.conf diff --git a/clmctest/services/ipendpoint/install.sh b/src/test/clmctest/services/ipendpoint/install.sh old mode 100755 new mode 100644 similarity index 100% rename from clmctest/services/ipendpoint/install.sh rename to src/test/clmctest/services/ipendpoint/install.sh diff --git a/clmctest/services/ipendpoint/telegraf_ipendpoint.conf b/src/test/clmctest/services/ipendpoint/telegraf_ipendpoint.conf similarity index 100% rename from clmctest/services/ipendpoint/telegraf_ipendpoint.conf rename to src/test/clmctest/services/ipendpoint/telegraf_ipendpoint.conf diff --git a/clmctest/services/loadtest-streaming/install.sh b/src/test/clmctest/services/loadtest-streaming/install.sh old mode 100755 new mode 100644 similarity index 100% rename from clmctest/services/loadtest-streaming/install.sh rename to src/test/clmctest/services/loadtest-streaming/install.sh diff --git a/clmctest/services/loadtest-streaming/telegraf_loadtest_streaming.conf b/src/test/clmctest/services/loadtest-streaming/telegraf_loadtest_streaming.conf similarity index 100% rename from clmctest/services/loadtest-streaming/telegraf_loadtest_streaming.conf rename to src/test/clmctest/services/loadtest-streaming/telegraf_loadtest_streaming.conf diff --git a/clmctest/services/minio/install.sh b/src/test/clmctest/services/minio/install.sh similarity index 100% rename from clmctest/services/minio/install.sh rename to src/test/clmctest/services/minio/install.sh diff --git a/clmctest/services/minio/minio.conf b/src/test/clmctest/services/minio/minio.conf similarity index 100% rename from clmctest/services/minio/minio.conf rename to src/test/clmctest/services/minio/minio.conf diff --git a/clmctest/services/minio/telegraf_minio.conf b/src/test/clmctest/services/minio/telegraf_minio.conf similarity index 100% rename from clmctest/services/minio/telegraf_minio.conf rename to src/test/clmctest/services/minio/telegraf_minio.conf diff --git a/clmctest/services/mongo/install.sh b/src/test/clmctest/services/mongo/install.sh old mode 100755 new mode 100644 similarity index 100% rename from clmctest/services/mongo/install.sh rename to src/test/clmctest/services/mongo/install.sh diff --git a/clmctest/services/mongo/telegraf_mongo.conf b/src/test/clmctest/services/mongo/telegraf_mongo.conf similarity index 100% rename from clmctest/services/mongo/telegraf_mongo.conf rename to src/test/clmctest/services/mongo/telegraf_mongo.conf diff --git a/clmctest/services/nginx/install.sh b/src/test/clmctest/services/nginx/install.sh old mode 100755 new mode 100644 similarity index 100% rename from clmctest/services/nginx/install.sh rename to src/test/clmctest/services/nginx/install.sh diff --git a/clmctest/services/nginx/nginx.conf b/src/test/clmctest/services/nginx/nginx.conf similarity index 100% rename from clmctest/services/nginx/nginx.conf rename to src/test/clmctest/services/nginx/nginx.conf diff --git a/clmctest/services/nginx/telegraf_nginx.conf b/src/test/clmctest/services/nginx/telegraf_nginx.conf similarity index 100% rename from clmctest/services/nginx/telegraf_nginx.conf rename to src/test/clmctest/services/nginx/telegraf_nginx.conf diff --git a/clmctest/services/pytest/install.sh b/src/test/clmctest/services/pytest/install.sh old mode 100755 new mode 100644 similarity index 100% rename from clmctest/services/pytest/install.sh rename to src/test/clmctest/services/pytest/install.sh diff --git a/clmctest/services/vlc/install.sh b/src/test/clmctest/services/vlc/install.sh old mode 100755 new mode 100644 similarity index 100% rename from clmctest/services/vlc/install.sh rename to src/test/clmctest/services/vlc/install.sh diff --git a/setup.py b/src/test/setup.py similarity index 97% rename from setup.py rename to src/test/setup.py index 191d3db..78c1628 100644 --- a/setup.py +++ b/src/test/setup.py @@ -42,7 +42,7 @@ def get_version(fname): setup( name = "clmctest", - version = get_version("clmctest/_version.py"), + version = get_version("_version.py"), author = "Michael Boniface", author_email = "mjb@it-innovation.soton.ac.uk", description = "FLAME CLMC Test Module", -- GitLab