diff --git a/README.md b/README.md index 4739ab358961acac0889245b2c1e4e4b60d953e2..f12d18fc94d7d85dd6bb512159686252ad4a514d 100644 --- a/README.md +++ b/README.md @@ -91,15 +91,19 @@ SSH into the CLMC server `vagrant --fixture=streaming-sim -- ssh clmc-service` +Then go to the 'vagrant' directory. + +`cd /vagrant` + The next step is to generate the test data, which could be done in two ways. First option is to run a python script to generate the test data sets -`python3 /vagrant/test/streaming-sim/StreamingSim.py` +`python3 test/streaming-sim/StreamingSim.py` This script could also be used to clear the generated data by using the '-c' option -`python3 /vagrant/test/streaming-sim/StreamingSim.py -c` +`python3 test/streaming-sim/StreamingSim.py -c` The second option is to directly run the testing module, which will detect if the data was generated, and if not, will automatically generate the data before executing the tests. Keep in mind that if the test data is being generated using this way, a 10 seconds timeout @@ -108,7 +112,7 @@ using the first option, only the tests would be executed. The command for running the testing module is -`pytest -s /vagrant/test/streaming-sim/test_simresults.py` +`pytest -s test/streaming-sim/test_simresults.py` The `-s` option in the command is used to output prints used in the test code and is, therefore, optional. diff --git a/test/services/loadtest-streaming/telegraf_template.conf b/test/services/loadtest-streaming/telegraf_loadtest_streaming.conf similarity index 100% rename from test/services/loadtest-streaming/telegraf_template.conf rename to test/services/loadtest-streaming/telegraf_loadtest_streaming.conf diff --git a/test/streaming-sim/conftest.py b/test/streaming-sim/conftest.py index de3ebaef1ef1c53b3471690b8a4f98af766266e1..a1b0c145cc890a2e3dca9a2bcca1ac954f8d55d5 100644 --- a/test/streaming-sim/conftest.py +++ b/test/streaming-sim/conftest.py @@ -2,13 +2,31 @@ import pytest import yaml +from influxdb import InfluxDBClient -@pytest.fixture(scope="module", - params=[{'config1': {'rspec': 'test/streaming-sim/rspec.yml', 'id': 'myid'}}]) + +@pytest.fixture(scope="module", params=[{'config': {'rspec': 'test/streaming-sim/rspec.yml'}}]) def streaming_sim_config(request): - """Returns the service configuration deployed for the streaming simulation test. In future this needs to be a parameterised fixture shared with other rspec.yml based tests""" - print(request.param['config1']['rspec']) - print(request.param['config1']['id']) - with open(request.param['config1']['rspec'], 'r') as stream: + """ + Reads the service configuration deployed for the streaming simulation test. + + :param request: access the parameters of the fixture + :return: the python object representing the read YAML file + """ + + with open(request.param['config']['rspec'], 'r') as stream: data_loaded = yaml.load(stream) - return data_loaded \ No newline at end of file + return data_loaded + + +@pytest.fixture(params=[{'database': 'CLMCMetrics'}], scope='module') +def get_db_client(streaming_sim_config, request): + """ + Creates an Influx DB client for the CLMC metrics database + + :param streaming_sim_config: the fixture returning the yaml configuration + :param request: access the parameters of the fixture + :return: the created Influx DB client + """ + + return InfluxDBClient(host=streaming_sim_config['hosts'][0]['ip_address'], port=8086, database=request.param['database'], timeout=10) diff --git a/test/streaming-sim/test_rspec.py b/test/streaming-sim/test_rspec.py index efecd68bf0eac6c89b23320453d0b5425edc0c67..ecce587eab36aab6873b6c10c1c3924bcee93717 100644 --- a/test/streaming-sim/test_rspec.py +++ b/test/streaming-sim/test_rspec.py @@ -1,17 +1,40 @@ #!/usr/bin/python3 +from subprocess import run +from platform import system import pytest -import os -def test_service_names(streaming_sim_config): - print(streaming_sim_config['hosts'][0]['name']) - assert streaming_sim_config['hosts'][0]['name'] == 'clmc-service' - assert streaming_sim_config['hosts'][1]['name'] == 'ipendpoint1' - assert streaming_sim_config['hosts'][2]['name'] == 'ipendpoint2' + +@pytest.mark.parametrize("service_name", [ + 'clmc-service', + 'ipendpoint1', + 'ipendpoint2' +]) +def test_service_names(streaming_sim_config, service_name): + """ + Tests the service names in the configuration. + + :param streaming_sim_config: the configuration fixture collected from conftest.py + :param service_name the service name to test + """ + + assert any(s['name'] == service_name for s in streaming_sim_config['hosts']), "{0} not in list of hosts".format(service_name) + print("\nSuccessfully passed configuration test for service name {0}\n".format(service_name)) + def test_ping(streaming_sim_config): - """This test will only run on linux due to using os.system library""" - for x in streaming_sim_config['hosts']: - print(x['ip_address']) - response = os.system("ping -c 1 " + x['ip_address']) - assert response == 0 \ No newline at end of file + """ + Pings each service to test for liveliness + + :param streaming_sim_config: the configuration fixture collected from conftest.py + """ + + print("\n") # blank line printed for formatting purposes + + ping_count = 1 + system_dependent_param = "-n" if system().lower() == "windows" else "-c" + + for service in streaming_sim_config['hosts']: + command = ["ping", system_dependent_param, str(ping_count), service['ip_address']] + assert run(command).returncode == 0, "Service ping test failed for {0} with ip address {1}".format(service['name'], service['ip_address']) + print("\nSuccessfully passed ping test for service: {0}\n".format(service['name'])) diff --git a/test/streaming-sim/test_simresults.py b/test/streaming-sim/test_simresults.py index 9c87987cc9b2aa70ae8d555b0f65da72a9e97a08..ffdd8a949b2cbb7408fe40e3fbcbc8d2fa04d3b1 100644 --- a/test/streaming-sim/test_simresults.py +++ b/test/streaming-sim/test_simresults.py @@ -1,6 +1,5 @@ #!/usr/bin/python3 -from influxdb import InfluxDBClient import pytest from StreamingSim import run_simulation_fixture @@ -11,15 +10,15 @@ class TestSimulation(object): """ @pytest.mark.parametrize("query, expected_result", [ - ("SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"cpu_usage\"", + ('SELECT count(*) FROM "CLMCMetrics"."autogen"."cpu_usage"', {"time": "1970-01-01T00:00:00Z", "count_cpu_active_time": 7200, "count_cpu_idle_time": 7200, "count_cpu_usage": 7200}), - ("SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"ipendpoint_route\"", + ('SELECT count(*) FROM "CLMCMetrics"."autogen"."ipendpoint_route"', {"time": "1970-01-01T00:00:00Z", "count_http_requests_fqdn_m": 7200, "count_network_fqdn_latency": 7200}), - ("SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"mpegdash_service\"", + ('SELECT count(*) FROM "CLMCMetrics"."autogen"."mpegdash_service"', {"time": "1970-01-01T00:00:00Z", "count_avg_response_time": 7200, "count_peak_response_time": 7200, "count_requests": 7200}), - ("SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"net_port_io\"", + ('SELECT count(*) FROM "CLMCMetrics"."autogen"."net_port_io"', {"time": "1970-01-01T00:00:00Z", "count_RX_BYTES_PORT_M": 7200, "count_TX_BYTES_PORT_M": 7200}), - ("SELECT count(*) FROM \"CLMCMetrics\".\"autogen\".\"vm_res_alloc\"", + ('SELECT count(*) FROM "CLMCMetrics"."autogen"."vm_res_alloc"', {"time": "1970-01-01T00:00:00Z", "count_cpu": 12, "count_memory": 12, "count_storage": 12}) ]) def test_simulation(self, query, expected_result, get_db_client, run_simulation_fixture): @@ -28,6 +27,7 @@ class TestSimulation(object): :param query: the query to execute (value obtained from the pytest parameter decorator) :param expected_result: the result expected from executing the query (value obtained from the pytest parameter decorator) + :param get_db_client the import db client fixture - imported from contest.py :param run_simulation_fixture: the imported fixture to use to generate the testing data - the return value of the fixture is not needed in this case """ @@ -47,7 +47,3 @@ class TestSimulation(object): assert expected_result == actual_result, "Simulation test failure" print("Successfully passed test for the following query: {0}".format(query)) - - @pytest.fixture(params=[{'database': 'CLMCMetrics'}], scope='class') - def get_db_client(self, request): - return InfluxDBClient(host='localhost', port=8086, database=request.param['database'], timeout=10) diff --git a/test/streaming/conftest.py b/test/streaming/conftest.py index 4fe3bb9dc1298df611392594940a7efca95d2ddf..77e0f1d2d5f50a2a13d7918d24b155563a377436 100644 --- a/test/streaming/conftest.py +++ b/test/streaming/conftest.py @@ -3,9 +3,16 @@ import pytest import yaml -@pytest.fixture(scope="module") -def streaming_config(): - """Returns the service configuration deployed for the streaming test. In future this needs to be a parameterised fixture shared with other rspec.yml based tests""" - with open("test/streaming/rspec.yml", 'r') as stream: + +@pytest.fixture(scope="module", params=[{'config': {'rspec': '/vagrant/test/streaming/rspec.yml'}}]) +def streaming_config(request): + """ + Reads the service configuration deployed for the streaming simulation test. + + :param request: access the parameters of the fixture + :return: the python object representing the read YAML file + """ + + with open(request.param['config']['rspec'], 'r') as stream: data_loaded = yaml.load(stream) - return data_loaded \ No newline at end of file + return data_loaded diff --git a/test/streaming/manual.md b/test/streaming/manual.md index 0b55f5b7d5a958fcb6828371aa9edc561bdf2f7e..69d7899001cb072a34676af65ad8bae62e44972a 100644 --- a/test/streaming/manual.md +++ b/test/streaming/manual.md @@ -25,33 +25,43 @@ # CLMC Adaptive Streaming Test -This test streams mpeg-dash video using the two apache servers monitored by Telegraf configured with a default apache plugin and a net_response plugin. The data is stored in the `clmc-service` using database `CLMCMetrics` and measurements `apache` and `net_response` +This test streams mpeg-dash video using the two nginx servers monitored by Telegraf configured with a default apache plugin and a net_response plugin. The data is stored in the `clmc-service` using database `CLMCMetrics` and measurements `nginx` and `net_response` The following command brings up the services -`vagrant --infra=streaming up` +`vagrant --fixture=streaming up` * clmc-service: configured with influx, kapacitor, chornograf -* apache1@DC1, apache2@DC2: configured with apache and a test video located at http://192.168.50.11:80/test_video/stream.mpd on the internal vbox network and at http://localhost:8081/test_video/stream.mpd if accessing from the host machine +* nginx1@DC1, nginx2@DC2: configured with nginx and a test video located at http://192.168.50.11:80/test_video/stream.mpd on the internal vbox network and at http://localhost:8081/test_video/stream.mpd if accessing from the host machine ### Run the test set-up -`vagrant --infra=streaming ssh clmc-service -- "sudo /vagrant/test/streaming/setup.sh /vagrant/test/streaming"` +`vagrant --fixture=streaming ssh clmc-service -- "sudo /vagrant/test/streaming/setupCLMC.sh /vagrant/test/streaming"` +`vagrant --fixture=streaming ssh nginx1 -- "sudo /vagrant/test/streaming/setupNGINX.sh"` ### Run the automated test To run the load test using the following command (here, the last parameter '15' refers to the number of VLC player clients to be launched): -`vagrant --infra=streaming ssh loadtest-streaming -- "/vagrant/test/streaming/run.sh /home/ubuntu/test/streaming http://192.168.50.11/test_video/stream.mpd 15` +`vagrant --fixture=streaming ssh loadtest-streaming -- "sudo /vagrant/test/streaming/run.sh /home/ubuntu/test/streaming http://192.168.50.11/test_video/stream.mpd 15` This test currently just generates the load and does not have any assertions. It breaks at 1000. +And then point your browser to the Chronograf dashboard: -### Manual test +`http://localhost:8888` -And then point your browser to: +### Run the automated PyTests -`http://localhost:8888` +SSH into the clmc-service VM: + +`vagrant --fixture=streaming ssh clmc-service` + +Run the automated tests written in pytest: + +`pytest -s /vagrant/test/streaming/` + +### Manual test ## Manual set-up of Chronograf's CLMC data source @@ -75,7 +85,7 @@ Start the VLC Player The test video is the FLAME project video and it can be viewed at the following location. -`Enter the network URL: http://localhost:8081/test-video/stream.mpd for apache1 server` +`Enter the network URL: http://localhost:8081/test_video/stream.mpd for nginx1 server` The video should play. @@ -83,9 +93,9 @@ The video should play. Open Chronograph by entering the following URL into a browser on the host http://localhost:8888. Your CLMC data source, Kapacitor and demonstration dashboard should be ready for you to explore. -Press the Data Explorer in the menu and select the apache measurement and create a query such as +Press the Data Explorer in the menu and select the nginx measurement and create a query such as -`SELECT mean("BytesPerSec") AS "mean_BytesPerSec" FROM "CLMCMetrics"."autogen"."apache" WHERE time > now() - 5m GROUP BY time(10s)` +`SELECT mean("requests") AS "mean_requests" FROM "CLMCMetrics"."autogen"."nginx" WHERE time > now() - 1h GROUP BY time(10s)` ## KPI triggers @@ -94,40 +104,38 @@ In this demonstrator an example KPI rule has been set up in Kapacitor which fire ``` dbrp "CLMCMetrics"."autogen" -// Apache 1 rule +// Nginx 1 rule // ------------- -var a1Data = batch - |query(''' SELECT mean("ReqPerSec") AS "mean_RPS" FROM "CLMCMetrics"."autogen"."apache" WHERE "ipendpoint"='adaptive_streaming_I1_apache1' ''') +var n1Data = batch + |query(''' SELECT mean("active") AS "mean_active" FROM "CLMCMetrics"."autogen"."nginx" WHERE "ipendpoint"='adaptive_streaming_I1_nginx1' ''') .period(5s) .every(5s) -var a1Alert = a1Data +varn n1Alert = n1Data |alert() - .id('{{ .Name }}/adaptive_streaming_I1_apache1') - .message('{{ .ID }} is {{ .Level }} Mean Requests Per Second: {{ index .Fields "mean_RPS" }}') - .warn(lambda: "mean_RPS" > 0.2) - .crit(lambda: "mean_RPS" > 0.5) + .id('{{ .Name }}/adaptive_streaming_I1_nginx1') + .message('{{ .ID }} is {{ .Level }} Mean active connections: {{ index .Fields "mean_active" }}') + .warn(lambda: "mean_active" > 10) .slack() .log( '/tmp/RPSLoad.log' ) -// Apache 2 rule +// Nginx 2 rule // ------------- -var a2Data = batch - |query(''' SELECT mean("ReqPerSec") AS "mean_RPS" FROM "CLMCMetrics"."autogen"."apache" WHERE "ipendpoint"='adaptive_streaming_I1_apache2' ''') +var n2Data = batch + |query(''' SELECT mean("active") AS "mean_active" FROM "CLMCMetrics"."autogen"."nginx" WHERE "ipendpoint"='adaptive_streaming_I1_nginx2' ''') .period(5s) .every(5s) -var a2Alert = a2Data +var n2Alert = n2Data |alert() - .id('{{ .Name }}/adaptive_streaming_I1_apache2') - .message('{{ .ID }} is {{ .Level }} Mean Requests Per Second: {{ index .Fields "mean_RPS" }}') - .warn(lambda: "mean_RPS" > 0.2) - .crit(lambda: "mean_RPS" > 0.5) + .id('{{ .Name }}/adaptive_streaming_I1_nginx2') + .message('{{ .ID }} is {{ .Level }} Mean active connections: {{ index .Fields "mean_active" }}') + .warn(lambda: "mean_active" > 10) .slack() .log( '/tmp/RPSLoad.log' ) ``` -Alerts are sent both an internal logging within the CLMC service file system and also to a FLAME demo Slack service: +Alerts are sent to both an internal logging within the CLMC service file system and also to a FLAME demo Slack service: https://flamedemo-itinnov.slack.com @@ -136,5 +144,3 @@ Alerts can be found under the '#clmc' channel. ### Kapacitor rules in Chronograf's GUI Additional rules can be added to this demonstrator either via the Chronograf GUI (see [here](https://docs.influxdata.com/chronograf/v1.4/introduction/getting-started/#4-connect-chronograf-to-kapacitor) for more information) or by using the Kapacitor HTTP API and TICKscript (for an introduction, [look here](https://docs.influxdata.com/kapacitor/v1.4/tick/)). - - diff --git a/test/streaming/run.sh b/test/streaming/run.sh index 359204945167465d1920d88d6dc84930787fd9f1..81c7d5f6aba81658bf416e8decd9d62d7b96f6a1 100755 --- a/test/streaming/run.sh +++ b/test/streaming/run.sh @@ -48,7 +48,7 @@ COUNTER=0 MAX_CLIENTS=$3 while [ $COUNTER -lt $MAX_CLIENTS ]; do # run cvlc headless, redirect stderr into stdout, pipe that into the report.sh script - cvlc -Vdummy --no-audio $STREAM_URI 2>&1 | /home/ubuntu/flame-clmc/test/streaming/report.sh ${COUNTER} & + cvlc -Vdummy --no-audio $STREAM_URI 2>&1 | /vagrant/test/streaming/report.sh ${COUNTER} & sleep 1 let COUNTER=COUNTER+1 done diff --git a/test/streaming/setup.sh b/test/streaming/setupCLMC.sh old mode 100755 new mode 100644 similarity index 80% rename from test/streaming/setup.sh rename to test/streaming/setupCLMC.sh index 39b958a2c2c0113169b99d451db632244b739b0d..6d2bd38390aca17ad2ad89a2debb6d5f89eab794 --- a/test/streaming/setup.sh +++ b/test/streaming/setupCLMC.sh @@ -48,21 +48,3 @@ curl -i -X POST -H "Content-Type: application/json" http://localhost:9092/kapaci # Set up dashboard curl -i -X POST -H "Content-Type: application/json" http://localhost:8888/chronograf/v1/dashboards -d @$TEST_DIR/dashboard.json - -# Copy test data -# Apache -# DEST_DIR="/var/www/html/" - -# NGINX -DEST_DIR="/usr/share/nginx/html" - -TEST_VIDEO="20180212104221flame-project-full.mp4" -TEST_VIDEO_ARCHIVE=$TEST_VIDEO".gz" -DEST_FILE=$DEST_DIR"/"$TEST_VIDEO_ARCHIVE - -# Copy files for MPEG-DASH testing -curl "ftp://ftp.it-innovation.soton.ac.uk/testdata/video/"$TEST_VIDEO_ARCHIVE --user flame-rw:DR8ngj3ogSjd8gl -o $DEST_FILE -tar -xvf $DEST_FILE -C $DEST_DIR - -rm -rf $DEST_FILE -mv $DEST_DIR"/"$TEST_VIDEO $DEST_DIR"/"test_video \ No newline at end of file diff --git a/test/streaming/setupNGINX.sh b/test/streaming/setupNGINX.sh new file mode 100644 index 0000000000000000000000000000000000000000..3833350c7e2a157538c5014d12195627c5aaf538 --- /dev/null +++ b/test/streaming/setupNGINX.sh @@ -0,0 +1,41 @@ +#!/bin/bash +#///////////////////////////////////////////////////////////////////////// +#// +#// (c) University of Southampton IT Innovation Centre, 2018 +#// +#// Copyright in this software belongs to University of Southampton +#// IT Innovation Centre of Gamma House, Enterprise Road, +#// Chilworth Science Park, Southampton, SO16 7NS, UK. +#// +#// This software may not be used, sold, licensed, transferred, copied +#// or reproduced in whole or in part in any manner or form or in or +#// on any media by any person other than in accordance with the terms +#// of the Licence Agreement supplied with the software, or otherwise +#// without the prior written consent of the copyright owners. +#// +#// This software is distributed WITHOUT ANY WARRANTY, without even the +#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +#// PURPOSE, except where stated in the Licence Agreement supplied with +#// the software. +#// +#// Created By : Simon Crowle +#// Created Date : 14/02/2018 +#// Created for Project : FLAME +#// +#///////////////////////////////////////////////////////////////////////// + +# NGINX +DEST_DIR="/usr/share/nginx/html" + +TEST_VIDEO="20180212104221flame-project-full.mp4" +TEST_VIDEO_ARCHIVE=$TEST_VIDEO".gz" +DEST_FILE=$DEST_DIR"/"$TEST_VIDEO_ARCHIVE + +echo "ftp://ftp.it-innovation.soton.ac.uk/testdata/video/"$TEST_VIDEO_ARCHIVE + +# Copy files for MPEG-DASH testing +curl "ftp://ftp.it-innovation.soton.ac.uk/testdata/video/"$TEST_VIDEO_ARCHIVE --user flame-rw:DR8ngj3ogSjd8gl -o $DEST_FILE +tar -xvf $DEST_FILE -C $DEST_DIR + +rm -rf $DEST_FILE +mv $DEST_DIR"/"$TEST_VIDEO $DEST_DIR"/"test_video diff --git a/test/streaming/test_rspec.py b/test/streaming/test_rspec.py index ea21ea9054143172ba27ab7492a4d4077d6a667d..0bbea5403b59178f609661eb2f4fd280822b5b74 100644 --- a/test/streaming/test_rspec.py +++ b/test/streaming/test_rspec.py @@ -1,19 +1,41 @@ #!/usr/bin/python3 +from subprocess import run +from platform import system import pytest -import os -def test_service_names(streaming_config): - print(streaming_config['hosts'][0]['name']) - assert streaming_config['hosts'][0]['name'] == 'clmc-service' - assert streaming_config['hosts'][1]['name'] == 'nginx1' - assert streaming_config['hosts'][2]['name'] == 'nginx2' - assert streaming_config['hosts'][3]['name'] == 'loadtest-streaming' + +@pytest.mark.parametrize("service_name", [ + 'clmc-service', + 'nginx1', + 'nginx2', + 'loadtest-streaming' +]) +def test_service_names(streaming_config, service_name): + """ + Tests the service names in the configuration. + + :param streaming_config: the configuration fixture collected from conftest.py + :param service_name the service name to test + """ + + assert any(s['name'] == service_name for s in streaming_config['hosts']), "{0} not in list of hosts".format(service_name) + print("\nSuccessfully passed configuration test for service name {0}\n".format(service_name)) + def test_ping(streaming_config): - """This test will only run on linux""" - for x in streaming_config['hosts']: - print(x['ip_address']) - response = os.system("ping -c 1 " + x['ip_address']) - assert response == 0 - + """ + Pings each service to test for liveliness + + :param streaming_config: the configuration fixture collected from conftest.py + """ + + print("\n") # blank line printed for formatting purposes + + ping_count = 1 + system_dependent_param = "-n" if system().lower() == "windows" else "-c" + + for service in streaming_config['hosts']: + command = ["ping", system_dependent_param, str(ping_count), service['ip_address']] + assert run(command).returncode == 0, "Service ping test failed for {0} with ip address {1}".format(service['name'], service['ip_address']) + print("\nSuccessfully passed ping test for service: {0}\n".format(service['name'])) diff --git a/test/streaming/test_streaming.py b/test/streaming/test_streaming.py new file mode 100644 index 0000000000000000000000000000000000000000..a35ef3760200a0b3b35d878bb492dcf6e66b4fbf --- /dev/null +++ b/test/streaming/test_streaming.py @@ -0,0 +1,139 @@ +#!/usr/bin/python3 + +from threading import Thread +from time import sleep +from queue import Queue +from xml.etree import ElementTree +from urllib.parse import urljoin +from os.path import isfile +from os import remove, system +import pytest +import requests + + +class TestStreamingAlerts(object): + """ + A testing class used to group all the tests related to the streaming scenario. + """ + + @pytest.mark.parametrize("log", ["/tmp/RPSLoad.log"]) + def test_alerts(self, log, streaming_url, streaming_manifest): + """ + This test case generates some streaming requests to the server to ensure an alert is triggered and then tests the log file for this alert. Different logs can be tested by + appending to the list of parameters in the pytest decorator + + :param log: the path of the log file that is under test + :param streaming_url: the fixture providing the streaming url for this test case + :param streaming_manifest: the fixture providing the root of the XML streaming manifest + """ + + try: + if isfile(log): + remove(log) # delete log file if existing from previous tests + except PermissionError: + system("sudo rm {0}".format(log)) # handles the case for running on linux where permission will be required to delete the old log file + + segments = streaming_manifest.findall(".//{urn:mpeg:DASH:schema:MPD:2011}SegmentURL") + + threads_num = 30 + threads_queue = Queue(maxsize=threads_num) # a synchronized queue is used to track if all the threads has finished execution + threads = [StreamingThread(streaming_url, segments, threads_queue) for _ in range(threads_num)] + for t in threads: + t.start() + + alert_created = False + while True: + # loop while threads are execution and do a check every 2.5 seconds to check if either alert log has been created or threads have finished execution + sleep(2.5) + if isfile(log): + for t in threads: # kill all running threads in case log file is created beforehand + t.stop() + alert_created = True + + if threads_queue.full(): + break + + assert alert_created, "Alerts test failed: no log file is created indicating a triggered alert." + + print("\nSuccessfully passed alert creation test.\n") + + @staticmethod + @pytest.fixture(scope="class", params=[{"server": "http://192.168.50.11", "video": "/test_video/stream.mpd"}]) + def streaming_url(request): + """ + A fixture with class scope - used only in the scope of the testing class. + + :param request: the parameters for this fixture - server url and video relative url + :return: the combined URL for the video used for streaming + """ + + return urljoin(request.param["server"], request.param["video"]) + + @staticmethod + @pytest.fixture(scope="class") + def streaming_manifest(streaming_url): + """ + A fixture to download the manifest file for the streamed video and parse the downloaded XML content + + :param streaming_url: the fixture which provides the streaming url + :return: an XML root node object + """ + + manifest_xml = requests.get(streaming_url).text + root = ElementTree.fromstring(manifest_xml) + return root + + +class StreamingThread(Thread): + + def __init__(self, url, segments, queue): + """ + Subclassing the Thread class to create a custom streaming thread. + + :param url: the streaming url + :param segments: the list of SegmentURL XML nodes + :param queue: an auxiliary parameter used to indicate when this thread has finished execution + """ + + super(StreamingThread, self).__init__() + self.running = False + self.url = url + self.segments = segments + self.queue = queue + self._test_finished = False # a flag to indicate whether the thread should stop running + + def stop(self): + """ + Kill this thread and suspend its execution. + """ + + self._test_finished = True + + def run(self): + """ + A function, which simulates an actual streaming by downloading different audio/video segments from the server using a request session, + which leaves the connection open until executing. + """ + + size = len(self.segments) + size = size if size % 2 == 0 else size - 1 + + s = requests.session() + + for i in range(0, int(size / 2), 1): + segment_audio = self.segments[0] + segment_video = self.segments[int(size / 2) + i] + segment_audio_url = segment_audio.attrib.get('media') + segment_video_url = segment_video.attrib.get('media') + + s.get(urljoin(self.url, segment_audio_url)) + s.get(urljoin(self.url, segment_video_url)) + + # check if thread is killed in case the test has already succeeded + if self._test_finished: + break + + # a small time out to mimic the behaviour of a real streaming + sleep(2.5) + + self.queue.put(True) diff --git a/test/telegraf-agents/conftest.py b/test/telegraf-agents/conftest.py index fedd6f9292b5a7d3061f336eabbd9f8fb2b614e0..b096dd4d6875bed42e3c2842148c7b2f6db7a32d 100644 --- a/test/telegraf-agents/conftest.py +++ b/test/telegraf-agents/conftest.py @@ -4,13 +4,29 @@ import pytest import yaml from influxdb import InfluxDBClient -@pytest.fixture(scope="module") -def telegraf_agent_config(): - """Returns the service configuration deployed for the telegraf conf tests.""" - with open("test/telegraf-agents/rspec.yml", 'r') as stream: + +@pytest.fixture(scope="module", params=[{'config': {'rspec': 'test/telegraf-agents/rspec.yml'}}]) +def telegraf_agent_config(request): + """ + Reads the service configuration deployed for the streaming simulation test. + + :param request: access the parameters of the fixture + :return: the python object representing the read YAML file + """ + + with open(request.param['config']['rspec'], 'r') as stream: data_loaded = yaml.load(stream) return data_loaded + @pytest.fixture(params=[{'database': 'CLMCMetrics'}], scope='module') def influxdb(telegraf_agent_config, request): - return InfluxDBClient(telegraf_agent_config['hosts'][0]['ip_address'], 8086, request.param['database']) \ No newline at end of file + """ + Creates an Influx DB client for the CLMC metrics database + + :param telegraf_agent_config: the fixture returning the yaml configuration + :param request: access the parameters of the fixture + :return: the created Influx DB client + """ + + return InfluxDBClient(host=telegraf_agent_config['hosts'][0]['ip_address'], port=8086, database=request.param['database'], timeout=10) diff --git a/test/telegraf-agents/test_rspec.py b/test/telegraf-agents/test_rspec.py new file mode 100644 index 0000000000000000000000000000000000000000..7a7c7ee441210077edbd4d455427ce0d47e152a1 --- /dev/null +++ b/test/telegraf-agents/test_rspec.py @@ -0,0 +1,43 @@ +#!/usr/bin/python3 + +from subprocess import run +from platform import system +import pytest + + +@pytest.mark.parametrize("service_name", [ + 'clmc-service', + 'apache', + 'nginx', + 'mongo', + 'ffmpeg', + 'ipendpoint' +]) +def test_service_name(telegraf_agent_config, service_name): + """ + Tests the service names in the configuration. + + :param telegraf_agent_config: the configuration fixture collected from conftest.py + :param service_name the service name to test + """ + + assert any(s['name'] == service_name for s in telegraf_agent_config['hosts']), "{0} not in list of hosts".format(service_name) + print("\nSuccessfully passed configuration test for service name {0}\n".format(service_name)) + + +def test_ping(telegraf_agent_config): + """ + Pings each service to test for liveliness + + :param telegraf_agent_config: the configuration fixture collected from conftest.py + """ + + print("\n") # blank line printed for formatting purposes + + ping_count = 1 + system_dependent_param = "-n" if system().lower() == "windows" else "-c" + + for service in telegraf_agent_config['hosts']: + command = ["ping", system_dependent_param, str(ping_count), service['ip_address']] + assert run(command).returncode == 0, "Service ping test failed for {0} with ip address {1}".format(service['name'], service['ip_address']) + print("\nSuccessfully passed ping test for service: {0}\n".format(service['name'])) diff --git a/test/telegraf-agents/test_telegraf_agents.py b/test/telegraf-agents/test_telegraf_agents.py index e7602723b2ae818ef4423f17c1959982edf4aed2..b55f46a515940c7d6ab109fb7ea4caba7f0ff3ee 100644 --- a/test/telegraf-agents/test_telegraf_agents.py +++ b/test/telegraf-agents/test_telegraf_agents.py @@ -1,5 +1,5 @@ #!/usr/bin/python3 -import os + import pytest from influxdb import InfluxDBClient @@ -30,16 +30,24 @@ def test_ping(telegraf_agent_config): ('mem', 'SELECT mean("free") AS "mean" FROM "CLMCMetrics"."autogen"."mem"', 0) ]) def test_all_inputs(influxdb, measurement, query, expected_result): - """Tests measurements are received from an input plugin aggregated across all services """ + Tests measurements are received from an input plugin aggregated across all services + + :param influxdb: the influx db client fixture + :param measurement: the measurement to test + :param query: the query to execute + :param expected_result: the expected result from the query + """ + query_result = influxdb.query('SHOW measurements ON "CLMCMetrics"') points = list(query_result.get_points()) assert any(p['name'] == measurement for p in points), "{0} not in measurement list".format(measurement) query_result = influxdb.query(query) - points = list(query_result.get_points()) - actual_result = points[0]['mean'] - assert actual_result > expected_result, "actual result {0} is not > expected result {1} for query {2}".format(actual_result, str(expected_result), query) + points = next(query_result.get_points()) # get_points() returns a generator, to take the first element we can use the next() function + actual_result = points['mean'] + assert actual_result > expected_result, "actual result {0} is not > expected result {1} for query {2}".format(actual_result, str(expected_result), query) + @pytest.mark.parametrize("query, expected_result", [('filter query', 0), @@ -51,5 +59,4 @@ def test_global_tag_filtering(influxdb, query, expected_result): """ # run query # check result - assert 1 - + assert 1