Skip to content
Snippets Groups Projects
Commit 28e4e378 authored by MJB's avatar MJB
Browse files

Conflicts resolved

parents df5a8c3b 2454e9e9
Branches
Tags
No related merge requests found
Showing
with 1061 additions and 35 deletions
[run]
source = CLMCservice
omit = CLMCservice/tests.py
...@@ -7,4 +7,9 @@ ...@@ -7,4 +7,9 @@
*_version.py* *_version.py*
*reporc *reporc
ubuntu-xenial-16.04-cloudimg-console.log ubuntu-xenial-16.04-cloudimg-console.log
**/.pytest_cache/ .idea/
\ No newline at end of file *.egg
*.pyc
.pytest_cache
.tox
*$py.class
from pyramid.config import Configurator
from pyramid.settings import asbool
from CLMCservice.views import AggregatorConfig
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application."""
# a conversion is necessary so that the configuration value of the aggregator is stored as bool and not as string
aggregator_running = asbool(settings.get('aggregator_running', 'false'))
settings['aggregator_running'] = aggregator_running
config = Configurator(settings=settings)
config.add_route('aggregator', '/aggregator')
config.add_view(AggregatorConfig, attr='get', request_method='GET')
config.add_view(AggregatorConfig, attr='post', request_method='POST')
config.scan()
return config.make_wsgi_app()
import pytest
from pyramid import testing
from pyramid.httpexceptions import HTTPBadRequest
class TestAggregatorConfig(object):
"""
A pytest-implementation test for the aggregator configuration API calls
"""
@pytest.fixture(autouse=True)
def app_config(self):
"""
A fixture to implement setUp/tearDown functionality for all tests by initializing configuration structure for the web service
"""
self.config = testing.setUp()
self.config.add_settings({'aggregator_running': False})
yield
testing.tearDown()
def test_GET(self):
"""
Tests the GET method for the status of the aggregator.
"""
from CLMCservice.views import AggregatorConfig # nested import so that importing the class view is part of the test itself
assert not self.config.get_settings().get('aggregator_running'), "Initially aggregator is not running."
request = testing.DummyRequest()
response = AggregatorConfig(request).get()
assert type(response) == dict, "Response must be a dictionary representing a JSON object."
assert not response.get('aggregator_running'), "The response of the API call must return the aggregator status being set as False"
assert not self.config.get_settings().get('aggregator_running'), "A GET request must not modify the aggregator status."
@pytest.mark.parametrize("input_val, output_val", [
("True", True),
("true", True),
("1", True),
("False", False),
("false", False),
("0", False),
("t", None),
("f", None),
])
def test_POST(self, input_val, output_val):
"""
Tests the POST method for the status of the aggregator
:param input_val: the input form parameter
:param output_val: the expected output value, None for expecting an Exception
"""
from CLMCservice.views import AggregatorConfig # nested import so that importing the class view is part of the test itself
assert not self.config.get_settings().get('aggregator_running'), "Initially aggregator is not running."
request = testing.DummyRequest()
request.params['running'] = input_val
if output_val is not None:
response = AggregatorConfig(request).post()
assert response == {'aggregator_running': output_val}, "Response of POST request must include the new status of the aggregator"
assert self.config.get_settings().get('aggregator_running') == output_val, "Aggregator status must be updated to running."
else:
error_raised = False
try:
AggregatorConfig(request).post()
except HTTPBadRequest:
error_raised = True
assert error_raised, "Error must be raised in case of an invalid argument."
def str_to_bool(value):
"""
A utility function to convert a string to boolean based on simple rules.
:param value: the value to convert
:return: True or False
:raises ValueError: if value cannot be converted to boolean
"""
if type(value) is not str:
raise ValueError("This method only converts string to booolean.")
if value in ('False', 'false', '0'):
return False
elif value in ('True', 'true', '1'):
return True
else:
raise ValueError("Invalid argument for conversion")
from pyramid.view import view_defaults
from pyramid.httpexceptions import HTTPBadRequest
from CLMCservice.utilities import str_to_bool
@view_defaults(route_name='aggregator', renderer='json')
class AggregatorConfig(object):
"""
A class-based view for accessing and mutating the status of the aggregator.
"""
def __init__(self, request):
"""
Initialises the instance of the view with the request argument.
:param request: client's call request
"""
self.request = request
def get(self):
"""
A GET API call for the status of the aggregator.
:return: A JSON response with the status of the aggregator.
"""
aggregator_running = self.request.registry.settings.get('aggregator_running')
return {'aggregator_running': aggregator_running}
def post(self):
"""
A POST API call for the status of the aggregator.
:return: A JSON response to the POST call (success or fail).
:raises HTTPBadRequest: if form argument cannot be converted to boolean
"""
new_status = self.request.params.get('running')
try:
new_status = str_to_bool(new_status)
except ValueError:
raise HTTPBadRequest("Bad request parameter - expected a boolean, received {0}".format(self.request.params.get('running')))
self.request.registry.settings['aggregator_running'] = new_status
# TODO start/stop aggregator based on value of new status
return {'aggregator_running': new_status}
...@@ -104,4 +104,45 @@ Then the package is installed ...@@ -104,4 +104,45 @@ Then the package is installed
Then the tests are run Then the tests are run
`vagrant --fixture=scripts -- ssh test-runner -- -tt "pytest -s --pyargs clmctest.scripts"` `sudo apt-get install python3-pip`
\ No newline at end of file
`pip3 install pytest`
#### CLMC Service
The CLMC service is implemented using the Pyramid framework. (currently under development)
Before installing the CLMC service and its dependencies, it is recommended to use a virtual environment. To manage virtual
environments, **virtualenvwrapper** can be used.
```
pip install virtualenvwrapper
```
To create a virtual environment use the **mkvirtualenv** command:
```
mkvirtualenv CLMC
```
When created, you should already be set to use the new virtual environment, but to make sure of this use the **workon** command:
```
workon CLMC
```
Now, any installed libraries will be specificly installed in this environment only. To install and use the CLMC service
locally, the easiest thing to do is to use **pip** (make sure you are in the root folder of the project - ***flame-clmc***):
```
pip install -e .
```
Finally, start the service on localhost by using pyramid's **pserve**:
```
pserve development.ini --reload
```
You should now be able to see the 'Hello world' message when visiting **http://localhost:8080** in your browser.
#!/usr/bin/python3
"""
## © University of Southampton IT Innovation Centre, 2018
##
## Copyright in this software belongs to University of Southampton
## IT Innovation Centre of Gamma House, Enterprise Road,
## Chilworth Science Park, Southampton, SO16 7NS, UK.
##
## This software may not be used, sold, licensed, transferred, copied
## or reproduced in whole or in part in any manner or form or in or
## on any media by any person other than in accordance with the terms
## of the Licence Agreement supplied with the software, or otherwise
## without the prior written consent of the copyright owners.
##
## This software is distributed WITHOUT ANY WARRANTY, without even the
## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE, except where stated in the Licence Agreement supplied with
## the software.
##
## Created By : Nikolay Stanchev
## Created Date : 25-04-2018
## Created for Project : FLAME
"""
from influxdb import InfluxDBClient
from time import time, sleep
from urllib.parse import urlparse
from threading import Thread, Event
import clmctest.monitoring.LineProtocolGenerator as lp
class Aggregator(Thread):
"""
A class used to perform the aggregation feature of the CLMC - aggregation network and media service measurements. Currently, implemented as a thread,
so that the implementation can be tested using pytest.
"""
REPORT_PERIOD = 5 # currently, report period is 5s, that is every 5 seconds the mean delay values for the last 5 seconds are aggregated
DATABASE = 'E2EMetrics' # default database the aggregator uses
DATABASE_URL = 'http://203.0.113.100:8086' # default database URL the aggregator uses
def __init__(self, database=DATABASE, database_url=DATABASE_URL):
"""
Constructs an Aggregator instance.
:param database: database name to use
:param database_url: database url to use
"""
super(Aggregator, self).__init__() # call the constructor of the thread
# initialise a database client using the database url and the database name
url_object = urlparse(database_url)
self.db_client = InfluxDBClient(host=url_object.hostname, port=url_object.port, database=database, timeout=10)
self.db_url = database_url
self.db_name = database
# a stop flag event object used to handle the killing of the thread
self._stop_flag = Event()
def stop(self):
"""
A method used to stop the thread.
"""
self._stop_flag.set()
def run(self):
"""
Performs the functionality of the aggregator - query data from both measurements merge that data and post it back in influx every 5 seconds.
"""
current_time = int(time())
while True:
if self._stop_flag.is_set():
break
boundary_time = current_time - Aggregator.REPORT_PERIOD
boundary_time_nano = boundary_time * 1000000000
current_time_nano = current_time * 1000000000
# query the network delays and group them by path ID
network_delays = {}
result = self.db_client.query(
'SELECT mean(delay) as "net_delay" FROM "E2EMetrics"."autogen"."network_delays" WHERE time >= {0} and time < {1} GROUP BY path, source, target'.format(
boundary_time_nano, current_time_nano))
for item in result.items():
metadata, result_points = item
# measurement = metadata[0]
tags = metadata[1]
network_delays[(tags['path'], tags['source'], tags['target'])] = next(result_points)['net_delay']
# query the service delays and group them by endpoint, service function instance and sfr
service_delays = {}
result = self.db_client.query('SELECT mean(response_time) as "response_time" FROM "E2EMetrics"."autogen"."service_delays" WHERE time >= {0} and time < {1} GROUP BY endpoint, sf_instance, sfr'.format(boundary_time_nano, current_time_nano))
for item in result.items():
metadata, result_points = item
# measurement = metadata[0]
tags = metadata[1]
service_delays[tags['sfr']] = (next(result_points)['response_time'], tags['endpoint'], tags['sf_instance'])
# for each network path check if there is a media service delay report for the target sfr - if so, generate an e2e_delay measurement
for path in network_delays:
# check if target sfr is reported in service delays, in other words - if there is a media service instance being connected to target sfr
path_id, source, target = path
if target not in service_delays:
# if not continue with the other network path reports
continue
e2e_arguments = {"path_ID": None, "source_SFR": None, "target_SFR": None, "endpoint": None, "sf_instance": None, "delay_forward": None, "delay_reverse": None,
"delay_service": None, "time": boundary_time}
e2e_arguments['path_ID'] = path_id
e2e_arguments['delay_forward'] = network_delays[path]
# reverse the path ID to get the network delay for the reversed path
reversed_path = (path_id, target, source)
assert reversed_path in network_delays # reversed path must always be reported with the forward one - if there is network path A-B, there is also network path B-A
e2e_arguments['delay_reverse'] = network_delays[reversed_path]
# get the response time of the media component connected to the target SFR
service_delay = service_delays[target]
response_time, endpoint, sf_instance = service_delay
# put these points in the e2e arguments dictionary
e2e_arguments['delay_service'] = response_time
e2e_arguments['endpoint'] = endpoint
e2e_arguments['sf_instance'] = sf_instance
# if all the arguments of the e2e delay measurements were reported, then generate and post to Influx an E2E measurement row
if None not in e2e_arguments.items():
self.db_client.write_points(
lp.generate_e2e_delay_report(e2e_arguments['path_ID'], e2e_arguments['source_SFR'], e2e_arguments['target_SFR'], e2e_arguments['endpoint'],
e2e_arguments['sf_instance'], e2e_arguments['delay_forward'], e2e_arguments['delay_reverse'], e2e_arguments['delay_service'],
e2e_arguments['time']))
old_timestamp = current_time
# wait until {REPORT_PERIOD} seconds have passed
while current_time != old_timestamp + self.REPORT_PERIOD:
sleep(1)
current_time = int(time())
if __name__ == '__main__':
Aggregator().start()
#!/usr/bin/python3
"""
## © University of Southampton IT Innovation Centre, 2018
##
## Copyright in this software belongs to University of Southampton
## IT Innovation Centre of Gamma House, Enterprise Road,
## Chilworth Science Park, Southampton, SO16 7NS, UK.
##
## This software may not be used, sold, licensed, transferred, copied
## or reproduced in whole or in part in any manner or form or in or
## on any media by any person other than in accordance with the terms
## of the Licence Agreement supplied with the software, or otherwise
## without the prior written consent of the copyright owners.
##
## This software is distributed WITHOUT ANY WARRANTY, without even the
## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE, except where stated in the Licence Agreement supplied with
## the software.
##
## Created By : Michael Boniface
## Created Date : 15-04-2018
## Updated By : Nikolay Stanchev
## Updated Date : 16-04-2018
## Created for Project : FLAME
"""
from influxdb import InfluxDBClient
import clmctest.monitoring.LineProtocolGenerator as lp
import urllib.parse
import time
import random
class Simulator(object):
"""
Simulator used to generate E2E measurements.
"""
DATABASE = 'E2EMetrics' # default database name
DATABASE_URL = 'http://203.0.113.100:8086' # default database url
TICK = 1 # a simulation tick represents 1s
SIMULATION_LENGTH = 120 # simulation time in seconds
def __init__(self, database_url=DATABASE_URL, database=DATABASE):
"""
Initialises the simulator by creating a db client object and resetting the database.
:param database_url: db url
:param database: db name
"""
url_object = urllib.parse.urlparse(database_url)
self.db_client = InfluxDBClient(host=url_object.hostname, port=url_object.port, database=database, timeout=10)
self.db_url = database_url
self.db_name = database
self._reset_db()
def _reset_db(self):
"""
Reset the database using the already initialised db client object.
"""
self.db_client.drop_database(self.db_name)
self.db_client.create_database(self.db_name)
def run(self):
"""
Runs the simulation.
"""
# all network delays start from 1ms, the dictionary stores the information to report
paths = [
{'target': 'SR3',
'source': 'SR1',
'path_id': 'SR1---SR3',
'network_delay': 1},
{'target': 'SR1',
'source': 'SR3',
'path_id': 'SR1---SR3',
'network_delay': 1}
]
# current time in seconds (to test the aggregation we write influx data points related to future time), so we start from the current time
start_time = int(time.time())
sim_time = start_time
mean_delay_seconds_media = 10 # initial mean media service delay
sample_period_net = 2 # sample period for reporting network delays (measured in seconds) - net measurements reported every 2s
sample_period_media = 5 # sample period for reporting media service delays (measured in seconds) - service measurements reported every 5 seconds
for i in range(0, self.SIMULATION_LENGTH):
# measure net delay every 2 seconds for path SR1-SR3 (generates on tick 0, 2, 4, 6, 8, 10.. etc.)
if i % sample_period_net == 0:
path = paths[0]
self.db_client.write_points(lp.generate_network_delay_report(path['path_id'], path['source'], path['target'], path['network_delay'], sim_time))
# increase/decrease the delay in every sample report (min delay is 1)
path['network_delay'] = max(1, path['network_delay'] + random.randint(-3, 3))
# measure net delay every 2 seconds for path SR2-SR3 (generates on tick 1, 3, 5, 7, 9, 11.. etc.)
if (i+1) % sample_period_net == 0:
path = paths[1]
self.db_client.write_points(lp.generate_network_delay_report(path['path_id'], path['source'], path['target'], path['network_delay'], sim_time))
# increase/decrease the delay in every sample report (min delay is 1)
path['network_delay'] = max(1, path['network_delay'] + random.randint(-3, 3))
# measure service response time every 5 seconds
if i % sample_period_media == 0:
self.db_client.write_points(lp.generate_service_delay_report(mean_delay_seconds_media, "endpoint-1",
"ms-A.ict-flame.eu", "SR3", sim_time))
# increase/decrease the delay in every sample report (min delay is 10)
mean_delay_seconds_media = max(10, mean_delay_seconds_media + random.choice([random.randint(10, 20), random.randint(-20, -10)]))
# increase the time by one simulation tick
sim_time += self.TICK
end_time = sim_time
print("Simulation finished. Start time: {0}, End time: {1}".format(start_time, end_time))
if __name__ == "__main__":
Simulator().run()
...@@ -29,6 +29,95 @@ import uuid ...@@ -29,6 +29,95 @@ import uuid
from random import randint from random import randint
def generate_e2e_delay_report(path_id, source_sfr, target_sfr, endpoint, sf_instance, delay_forward, delay_reverse, delay_service, time):
"""
Generates a combined averaged measurement about the e2e delay and its contributing parts
:param path_ID: The path identifier, which is a bidirectional path ID for the request and the response path
:param source_SFR: source service router
:param target_SFR: target service router
:param endpoint: endpoint of the media component
:param sf_instance: service function instance (media component)
:param delay_forward: Path delay (Forward direction)
:param delay_reverse: Path delay (Reverse direction)
:param delay_service: the media service component response time
:param time: measurement timestamp
:return: a list of dict-formatted reports to post on influx
"""
result = [{"measurement": "e2e_delays",
"tags": {
"path_ID": path_id,
"source_SFR": source_sfr,
"target_SFR": target_sfr,
"endpoint": endpoint,
"sf_instance": sf_instance
},
"fields": {
"delay_forward": float(delay_forward),
"delay_reverse": float(delay_reverse),
"delay_service": float(delay_service)
},
"time": _getNSTime(time)
}]
return result
def generate_network_delay_report(path_id, source_sfr, target_sfr, e2e_delay, time):
"""
Generates a platform measurement about the network delay between two specific service routers.
:param path_id: the identifier of the path between the two service routers
:param source_sfr: the source service router
:param target_sfr: the target service router
:param e2e_delay: the e2e network delay for traversing the path between the two service routers
:param time: the measurement timestamp
:return: a list of dict-formatted reports to post on influx
"""
result = [{"measurement": "network_delays",
"tags": {
"path": path_id,
"source": source_sfr,
"target": target_sfr
},
"fields": {
"delay": e2e_delay
},
"time": _getNSTime(time)
}]
return result
def generate_service_delay_report(response_time, endpoint, sf_instance, sfr, time):
"""
Generates a service measurement about the media service response time.
:param response_time: the media service response time (This is not the response time for the whole round-trip, but only for the processing part of the media service component)
:param endpoint: endpoint of the media component
:param sf_instance: service function instance
:param sfr: the service function router that connects the endpoint of the SF instance to the FLAME network
:param time: the measurement timestamp
:return: a list of dict-formatted reports to post on influx
"""
result = [{"measurement": "service_delays",
"tags": {
"endpoint": endpoint,
"sf_instance": sf_instance,
"sfr": sfr
},
"fields": {
"response_time": response_time,
},
"time": _getNSTime(time)
}]
return result
# Reports TX and RX, scaling on requested quality # Reports TX and RX, scaling on requested quality
def generate_network_report(recieved_bytes, sent_bytes, time): def generate_network_report(recieved_bytes, sent_bytes, time):
result = [{"measurement": "net_port_io", result = [{"measurement": "net_port_io",
...@@ -125,6 +214,7 @@ def generate_endpoint_config(time, cpu, mem, storage, current_state, current_sta ...@@ -125,6 +214,7 @@ def generate_endpoint_config(time, cpu, mem, storage, current_state, current_sta
return result return result
def generate_mc_service_config( time, mcMeasurement, current_state, current_state_time, config_state_values ): def generate_mc_service_config( time, mcMeasurement, current_state, current_state_time, config_state_values ):
""" """
generates a measurement line for a media component configuration state generates a measurement line for a media component configuration state
......
...@@ -27,6 +27,8 @@ import yaml ...@@ -27,6 +27,8 @@ import yaml
import pkg_resources import pkg_resources
from influxdb import InfluxDBClient from influxdb import InfluxDBClient
from clmctest.monitoring.StreamingSim import Sim from clmctest.monitoring.StreamingSim import Sim
from clmctest.monitoring.E2ESim import Simulator
from clmctest.monitoring.E2EAggregator import Aggregator
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
...@@ -59,6 +61,12 @@ def influx_db(streaming_sim_config, request): ...@@ -59,6 +61,12 @@ def influx_db(streaming_sim_config, request):
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def simulator(streaming_sim_config): def simulator(streaming_sim_config):
"""
A fixture to obtain a simulator instance with the configuration parameters.
:param streaming_sim_config: the configuration object
:return: an instance of the simulator
"""
influx_url = "http://" + streaming_sim_config['hosts'][0]['ip_address'] + ":8086" influx_url = "http://" + streaming_sim_config['hosts'][0]['ip_address'] + ":8086"
influx_db_name = streaming_sim_config['hosts'][1]['database_name'] influx_db_name = streaming_sim_config['hosts'][1]['database_name']
...@@ -70,3 +78,31 @@ def simulator(streaming_sim_config): ...@@ -70,3 +78,31 @@ def simulator(streaming_sim_config):
simulator.reset() simulator.reset()
return simulator return simulator
@pytest.fixture(scope="module")
def e2e_simulator(streaming_sim_config):
"""
A fixture to obtain a simulator instance with the configuration parameters.
:param streaming_sim_config: the configuration object
:return: an instance of the E2E simulator
"""
influx_url = "http://" + streaming_sim_config['hosts'][0]['ip_address'] + ":8086"
return Simulator(database_url=influx_url)
@pytest.fixture(scope="module")
def e2e_aggregator(streaming_sim_config):
"""
A fixture to obtain an instance of the Aggregator class with the configuration parameters.
:param streaming_sim_config: the configuration object
:return: an instance of the Aggregator class
"""
influx_url = "http://" + streaming_sim_config['hosts'][0]['ip_address'] + ":8086"
return Aggregator(database_url=influx_url)
#!/usr/bin/python3
"""
## © University of Southampton IT Innovation Centre, 2018
##
## Copyright in this software belongs to University of Southampton
## IT Innovation Centre of Gamma House, Enterprise Road,
## Chilworth Science Park, Southampton, SO16 7NS, UK.
##
## This software may not be used, sold, licensed, transferred, copied
## or reproduced in whole or in part in any manner or form or in or
## on any media by any person other than in accordance with the terms
## of the Licence Agreement supplied with the software, or otherwise
## without the prior written consent of the copyright owners.
##
## This software is distributed WITHOUT ANY WARRANTY, without even the
## implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE, except where stated in the Licence Agreement supplied with
## the software.
##
## Created By : Nikolay Stanchev
## Created Date : 17-04-2018
## Created for Project : FLAME
"""
import pytest
import random
import time
class TestE2ESimulation(object):
"""
A testing class used to group all the tests related to the E2E simulation data
"""
@pytest.fixture(scope='class', autouse=True)
def run_simulator(self, e2e_simulator, e2e_aggregator):
"""
A fixture, which runs the simulation before running the tests.
:param e2e_simulator: the simulator for the end-to-end data
:param e2e_aggregator: the aggregator which merges the network and service measurements
"""
random.seed(0) # Seed random function so we can reliably test for average queries
print("Starting aggregator...")
e2e_aggregator.start()
print("Running simulation, please wait...")
e2e_simulator.run()
print("Waiting for INFLUX to finish receiving simulation data...")
time.sleep(e2e_simulator.SIMULATION_LENGTH) # wait for data to finish arriving at the INFLUX database
print("... simulation data fixture finished")
print("... stopping aggregator")
e2e_aggregator.stop()
@pytest.mark.parametrize("query, expected_result", [
('SELECT count(*) FROM "E2EMetrics"."autogen"."network_delays"',
{"time": "1970-01-01T00:00:00Z", "count_delay": 120}),
('SELECT count(*) FROM "E2EMetrics"."autogen"."service_delays"',
{"time": "1970-01-01T00:00:00Z", "count_response_time": 24}),
('SELECT count(*) FROM "E2EMetrics"."autogen"."e2e_delays"',
{"time": "1970-01-01T00:00:00Z", "count_delay_forward": 24, "count_delay_reverse": 24, "count_delay_service": 24}),
('SELECT mean(*) FROM "E2EMetrics"."autogen"."e2e_delays"',
{"time": "1970-01-01T00:00:00Z", "mean_delay_forward": 13.159722222222223, "mean_delay_reverse": 3.256944444444444, "mean_delay_service": 32.791666666666664}),
])
def test_simulation(self, influx_db, query, expected_result):
"""
This is the entry point of the test. This method will be found and executed when the module is ran using pytest
:param query: the query to execute (value obtained from the pytest parameter decorator)
:param expected_result: the result expected from executing the query (value obtained from the pytest parameter decorator)
:param influx_db the import db client fixture - imported from contest.py
"""
# pytest automatically goes through all queries under test, declared in the parameters decorator
print("\n") # prints a blank line for formatting purposes
# the raise_errors=False argument is given so that we could actually test that the DB didn't return any errors instead of raising an exception
query_result = influx_db.query(query, raise_errors=False)
# test the error attribute of the result is None, that is no error is returned from executing the DB query
assert query_result.error is None, "An error was encountered while executing query {0}.".format(query)
# get the dictionary of result points; the next() function just gets the first element of the query results generator (we only expect one item in the generator)
actual_result = next(query_result.get_points())
assert expected_result == actual_result, "E2E Simulation test failure"
print("Successfully passed test for the following query: {0}".format(query))
...@@ -65,18 +65,30 @@ class TestSimulation(object): ...@@ -65,18 +65,30 @@ class TestSimulation(object):
('SELECT count(*) FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'', ('SELECT count(*) FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
{"time": "1970-01-01T00:00:00Z", "count_current_state_time": 3607, "count_running_mst": 3607, "count_running_sum": 3607, "count_starting_mst": 3607, "count_starting_sum": 3607, "count_stopped_mst": 3607, "count_stopped_sum": 3607, "count_stopping_mst": 3607, "count_stopping_sum": 3607}), {"time": "1970-01-01T00:00:00Z", "count_current_state_time": 3607, "count_running_mst": 3607, "count_running_sum": 3607, "count_starting_mst": 3607, "count_starting_sum": 3607, "count_stopped_mst": 3607, "count_stopped_sum": 3607, "count_stopping_mst": 3607, "count_stopping_sum": 3607}),
('SELECT mean(unplaced_mst) as "unplaced_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE unplaced_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
{"time": "1970-01-01T00:00:00Z", "unplaced_mst": 0.7}),
('SELECT mean(placing_mst) as "placing_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE placing_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'', ('SELECT mean(placing_mst) as "placing_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE placing_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
{"time": "1970-01-01T00:00:00Z", "placing_mst": 9.4}), {"time": "1970-01-01T00:00:00Z", "placing_mst": 9.4}),
('SELECT mean(placed_mst) as "placed_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE placed_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
{"time": "1970-01-01T00:00:00Z", "placed_mst": 1.7000000000000002}),
('SELECT mean(booting_mst) as "booting_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE booting_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'', ('SELECT mean(booting_mst) as "booting_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE booting_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
{"time": "1970-01-01T00:00:00Z", "booting_mst": 9.6}), {"time": "1970-01-01T00:00:00Z", "booting_mst": 9.6}),
('SELECT mean(booted_mst) as "booted_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE booted_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
{"time": "1970-01-01T00:00:00Z", "booted_mst": 2.1}),
('SELECT mean(connecting_mst) as "connecting_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE connecting_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'', ('SELECT mean(connecting_mst) as "connecting_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE connecting_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
{"time": "1970-01-01T00:00:00Z", "connecting_mst": 10.2}), {"time": "1970-01-01T00:00:00Z", "connecting_mst": 10.2}),
('SELECT mean(connected_mst) as "connected_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE connected_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'', ('SELECT mean(connected_mst) as "connected_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE connected_mst <> 0 and ipendpoint=\'endpoint1.ms-A.ict-flame.eu\'',
{"time": "1970-01-01T00:00:00Z", "connected_mst": 3605.0}), {"time": "1970-01-01T00:00:00Z", "connected_mst": 3605.0}),
('SELECT mean(unplaced_mst) as "unplaced_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE unplaced_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
{"time": "1970-01-01T00:00:00Z", "unplaced_mst": 0.7}),
('SELECT mean(placing_mst) as "placing_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE placing_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'', ('SELECT mean(placing_mst) as "placing_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE placing_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
{"time": "1970-01-01T00:00:00Z", "placing_mst": 9.4}), {"time": "1970-01-01T00:00:00Z", "placing_mst": 9.4}),
('SELECT mean(placed_mst) as "placed_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE placed_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
{"time": "1970-01-01T00:00:00Z", "placed_mst": 1.7000000000000002}),
('SELECT mean(booting_mst) as "booting_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE booting_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'', ('SELECT mean(booting_mst) as "booting_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE booting_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
{"time": "1970-01-01T00:00:00Z", "booting_mst": 9.6}), {"time": "1970-01-01T00:00:00Z", "booting_mst": 9.6}),
('SELECT mean(booted_mst) as "booted_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE booted_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
{"time": "1970-01-01T00:00:00Z", "booted_mst": 2.1}),
('SELECT mean(connecting_mst) as "connecting_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE connecting_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'', ('SELECT mean(connecting_mst) as "connecting_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE connecting_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
{"time": "1970-01-01T00:00:00Z", "connecting_mst": 10.2}), {"time": "1970-01-01T00:00:00Z", "connecting_mst": 10.2}),
('SELECT mean(connected_mst) as "connected_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE connected_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'', ('SELECT mean(connected_mst) as "connected_mst" FROM "CLMCMetrics"."autogen"."endpoint_config" WHERE connected_mst <> 0 and ipendpoint=\'endpoint2.ms-A.ict-flame.eu\'',
...@@ -99,7 +111,6 @@ class TestSimulation(object): ...@@ -99,7 +111,6 @@ class TestSimulation(object):
('SELECT mean(stopping_mst) as "stopping_mst" FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE stopping_mst <> 0', ('SELECT mean(stopping_mst) as "stopping_mst" FROM "CLMCMetrics"."autogen"."mpegdash_mc_config" WHERE stopping_mst <> 0',
{"time": "1970-01-01T00:00:00Z", "stopping_mst": 1.1}), {"time": "1970-01-01T00:00:00Z", "stopping_mst": 1.1}),
]) ])
def test_simulation(self, influx_db, query, expected_result): def test_simulation(self, influx_db, query, expected_result):
""" """
This is the entry point of the test. This method will be found and executed when the module is ran using pytest This is the entry point of the test. This method will be found and executed when the module is ran using pytest
......
###
# app configuration
# https://docs.pylonsproject.org/projects/pyramid/en/latest/narr/environment.html
###
[app:main]
use = egg:CLMCservice
pyramid.reload_templates = true
pyramid.debug_authorization = false
pyramid.debug_notfound = false
pyramid.debug_routematch = false
pyramid.default_locale_name = en
pyramid.includes = pyramid_debugtoolbar
aggregator_running = false
# By default, the toolbar only appears for clients from IP addresses
# '127.0.0.1' and '::1'.
# debugtoolbar.hosts = 127.0.0.1 ::1
###
# wsgi server configuration
###
[server:main]
use = egg:waitress#main
listen = localhost:8080
###
# logging configuration
# https://docs.pylonsproject.org/projects/pyramid/en/latest/narr/logging.html
###
[loggers]
keys = root, CLMCservice
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = INFO
handlers = console
[logger_CLMCservice]
level = DEBUG
handlers =
qualname = CLMCservice
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(asctime)s %(levelname)-5.5s [%(name)s:%(lineno)s][%(threadName)s] %(message)s
<!--
// © University of Southampton IT Innovation Centre, 2017
//
// Copyright in this software belongs to University of Southampton
// IT Innovation Centre of Gamma House, Enterprise Road,
// Chilworth Science Park, Southampton, SO16 7NS, UK.
//
// This software may not be used, sold, licensed, transferred, copied
// or reproduced in whole or in part in any manner or form or in or
// on any media by any person other than in accordance with the terms
// of the Licence Agreement supplied with the software, or otherwise
// without the prior written consent of the copyright owners.
//
// This software is distributed WITHOUT ANY WARRANTY, without even the
// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE, except where stated in the Licence Agreement supplied with
// the software.
//
// Created By : Nikolay Stanchev
// Created Date : 27-04-2018
// Created for Project : FLAME
-->
## **Flame CLMC - Network and Media Service measurements aggregation**
### **Idea**
The idea is to aggregate platform measurement points with media service measurement points and obtain a third measurement from which we can easily
understand both end-to-end and round-trip performance of a media service. This is achieved by having a python script running on the background and aggregating
the data from both measurements on a given sample period, e.g. every 10 seconds. The script then posts the aggregated data back to Influx in a new measurement.
### **Assumptions**
* Network measurement - assumption is that we have a measurement for the network link delays, called **network_delays**, providing the following information:
| path (tag) | delay | time |
| --- | --- | --- |
| path identifier | e2e delay for the given path | time of measurement |
Here, the **path** tag value is the identifier of the path between two nodes in the network topology obtained from FLIPS. The assumption is that those identifiers
will be structured in such a way that we can obtain the source and target endpoint IDs from the path identifier itself. For example:
**endpoint1.ms-A.ict-flame.eu---endpoint2.ms-A.ict-flame.eu**
We can easily split the string on **'---'** and, thus, find the source endpoint is **endpoint1.ms-A.ict-flame.eu**, while the target endpoint is
**endpoint2.ms-A.ict-flame.eu**.
The delay field value is the network end-to-end delay in milliseconds for the path identified in the tag value.
* A response will traverse the same network path as the request, but in reverse direction.
* Media service measurement - assumption is that we have a measurement for media services' response time, called **service_delays**, providing the following information:
| FQDN (tag) | sf_instance (tag) | endpoint (tag) | response_time | time |
| --- | --- | --- | --- | --- |
| media service FQDN | ID of the service function instance | endpoint identifier | response time for the media service (s) | time of measurement |
Here, the **FQDN**, **sf_instance** and **endpoint** tag values identify a unique response time measurement. The response time field value is the
response time (measured in seconds) for the media service only, and it does not take into account any of the network measurements.
### **Goal**
The ultimate goal is to populate a new measurement, called **e2e_delays**, which will be provided with the following information:
| pathID_F (tag) | pathID_R (tag) | FQDN (tag) | sf_instance (tag) | D_path_F | D_path_R | D_service | time |
| --- | --- | --- | --- | --- | --- | --- | --- |
* *pathID_F* - tag used to identify the path in forward direction, e.g. **endpoint1.ms-A.ict-flame.eu---endpoint2.ms-A.ict-flame.eu**
* *pathID_R* - tag used to identify the path in reverse direction, e.g. **endpoint2.ms-A.ict-flame.eu---endpoint1.ms-A.ict-flame.eu**
* *FQDN* - tag used to identify the media service
* *sf_instance* - tag used to identify the media service
* *D_path_F* - network delay for path in forward direction
* *D_path_R* - network delay for path in reverse direction
* *D_service* - media service response time
Then we can easily query on this measurement to obtain different performance indicators, such as end-to-end overall delays,
round-trip response time or any of the contributing parts in those performance indicators.
### **Aggregation script**
What the aggregation script does is very similat to the functionality of a continuous query. Given a sample report period, e.g. 10s,
the script executes at every 10-second-period querying the averaged data for the last 10 seconds. The executed queries are:
* Network delays query - to obtain the network delay values and group them by their **path** identifier:
```
SELECT mean(delay) as "Dnet" FROM "E2EMetrics"."autogen".network_delays WHERE time >= now() - 10s and time < now() GROUP BY path
```
* Media service response time query - to obtain the response time values of the media service instances and group them by **FQDN**, **sf_instance** and **endpoint** identifiers:
```
SELECT mean(response_time) as "Dresponse" FROM "E2EMetrics"."autogen".service_delays WHERE time >= now() - 10s and time < now() GROUP BY FQDN, sf_instance, endpoint
```
The results of the queries are then matched against each other on endpoint ID: on every match of the **endpoint** tag of the **service_delays** measurement with
the target endpoint ID of the **network_delays** measurement, the rows are combined to obtain an **e2e_delay** measurement row, which is posted back to influx.
Example:
* Result from first query:
```
name: network_delays
tags: path=endpoint1.ms-A.ict-flame.eu---endpoint2.ms-A.ict-flame.eu
time Dnet
---- ----
1524833145975682287 9.2
name: network_delays
tags: path=endpoint2.ms-A.ict-flame.eu---endpoint1.ms-A.ict-flame.eu
time Dnet
---- ----
1524833145975682287 10.3
```
* Result from second query
```
name: service_delays
tags: FQDN=ms-A.ict-flame.eu, endpoint=endpoint2.ms-A.ict-flame.eu, sf_instance=test-sf-clmc-agent-build_INSTANCE
time Dresponse
---- ---------
1524833145975682287 11
```
The script will parse the path identifier **endpoint1.ms-A.ict-flame.eu---endpoint2.ms-A.ict-flame.eu** and find the target endpoint being
**endpoint2.ms-A.ict-flame.eu**. Then the script checks if there is service delay measurement row matching this endpoint. Since there is one,
those values will be merged, so the result will be a row like this:
| pathID_F (tag) | pathID_R (tag) | FQDN (tag) | sf_instance (tag) | D_path_F | D_path_R | D_service | time |
| --- | --- | --- | --- | --- | --- | --- | --- |
| endpoint1.ms-A.ict-flame.eu---endpoint2.ms-A.ict-flame.eu | endpoint2.ms-A.ict-flame.eu---endpoint1.ms-A.ict-flame.eu | ms-A.ict-flame.eu | test-sf-clmc-agent-build_INSTANCE | 9.2 | 10.3 | 11 | 1524833145975682287 |
Here, another assumption is made that we can reverse the path identifier of a network delay row and that the reverse path delay would also
be reported in the **network_delays** measurement.
The resulting row would then be posted back to influx in the **e2e_delays** measurement.
### **Reasons why we cannot simply use a continuous query to do the job of the script**
* Influx is very limited in merging measurements functionality. When doing a **select into** from multiple measurements, e.g.
*SELECT * INTO measurement0 FROM measurement1, measurement2*
influx will try to merge the data on matching time stamps and tag values (if there are any tags). If the two measurements
differ in tags, then we get rows with missing data.
* When doing a continuous query, we cannot perform any kind of manipulations on the data, which disables us on choosing which
rows to merge together.
* Continuous queries were not meant to be used for merging measurements. The main use case the developers provide is for
downsampling the data in one measurement.
\ No newline at end of file
...@@ -382,19 +382,22 @@ From the whole sample period (1s), the VM has been 0.9s in state 'placed'. Hence ...@@ -382,19 +382,22 @@ From the whole sample period (1s), the VM has been 0.9s in state 'placed'. Hence
the VM has been reported to be in state __placing__. Since it has exited state __placing__, the total time spent in this state (9.3s + 0.1s = 9.4s) is reported. the VM has been reported to be in state __placing__. Since it has exited state __placing__, the total time spent in this state (9.3s + 0.1s = 9.4s) is reported.
This includes the state time from previous reports. The mean state time value for __placing__ is the same as the sum value because the VM has only been once in this state. This includes the state time from previous reports. The mean state time value for __placing__ is the same as the sum value because the VM has only been once in this state.
| global tags | current_state (tag) | current_state_time | unplaced_sum | unplaced_mst | placing_sum | placing_mst | placed_sum | placed_mst | booting_sum | booting_mst | booted_sum | booted_mst | connecting_sum | connecting_mst | connected_sum | connected_mst | time | | global tags | current_state (tag) | current_state_time | unplaced_sum | unplaced_mst | placing_sum | placing_mst | placed_sum | placed_mst | ... | time |
| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
| ... | placing | 0.3 | 0.7 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... | | ... | placing | 0.3 | 0.7 | 0.7 | 0 | 0 | 0 | 0 | 0 | ... |
| ... | placing | 1.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... | | ... | placing | 1.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... |
| ... | placing | 2.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... | | ... | placing | 2.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... |
| ... | placing | 3.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... | | ... | placing | 3.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... |
| ... | placing | 4.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... | | ... | placing | 4.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... |
| ... | placing | 5.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... | | ... | placing | 5.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... |
| ... | placing | 6.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... | | ... | placing | 6.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... |
| ... | placing | 7.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... | | ... | placing | 7.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... |
| ... | placing | 8.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... | | ... | placing | 8.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... |
| ... | placing | 9.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... | | ... | placing | 9.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... |
| ... | placed | 0.9 | 0 | 0 | 9.4 | 9.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... | | ... | placed | 0.9 | 0 | 0 | 9.4 | 9.4 | 0 | 0 | 0 | ... |
In the table above, the state fields __booting_sum__, __booting_mst__, __booted_sum__, __booted_mst__, __connecting_sum__, __connecting_mst__, __connected_sum__ and __connected_mst__
were truncated, since these are always reported to be 0 and are not the states being monitored in the measurements row.
##### Media component configuration state model ##### Media component configuration state model
...@@ -418,6 +421,39 @@ An example (based on the figure above) of some measurement rows for a media comp ...@@ -418,6 +421,39 @@ An example (based on the figure above) of some measurement rows for a media comp
### Example endpoint state configuration queries ### Example endpoint state configuration queries
The following queries illustrate how to calculate _mean time between failures_ (MTBF) and _mean down time_ (MDT) for a specific endpoint.
_Q. What is the Mean Time Between Failures (MTBF) of endpoint 'adaptive_streaming_I1_apache1'?_
```
select mean(connected_mst) as "apache1_MTBF(s)" from "endpoint_config" where connected_mst <> 0 and ipendpoint = 'adaptive_streaming_I1_apache1'
```
```
name: endpoint_config
time apache1_MTBF(s)
---- ----------------
0 3605
```
_Q. What is the Mean Down Time (MDT) of endpoint 'adaptive_streaming_I1_apache1'?_
```
select mean(unplaced_mst) as "unplaced_mdt" into "endpoint_config_mdt" from "endpoint_config" where unplaced_mst <> 0 and ipendpoint = 'adaptive_streaming_I1_apache1'
select mean(placing_mst) as "placing_mdt" into "endpoint_config_mdt" from "endpoint_config" where placing_mst <> 0 and ipendpoint = 'adaptive_streaming_I1_apache1'
select mean(placed_mst) as "placed_mdt" into "endpoint_config_mdt" from "endpoint_config" where placed_mst <> 0 and ipendpoint = 'adaptive_streaming_I1_apache1'
select mean(booting_mst) as "booting_mdt" into "endpoint_config_mdt" from "endpoint_config" where booting_mst <> 0 and ipendpoint = 'adaptive_streaming_I1_apache1'
select mean(booted_mst) as "booted_mdt" into "endpoint_config_mdt" from "endpoint_config" where booted_mst <> 0 and ipendpoint = 'adaptive_streaming_I1_apache1'
select mean(connecting_mst) as "connecting_mdt" into "endpoint_config_mdt" from "endpoint_config" where connecting_mst <> 0 and ipendpoint = 'adaptive_streaming_I1_apache1'
select (unplaced_mdt + placing_mdt + placed_mdt + booting_mdt + booted_mdt + connecting_mdt) as "MDT(s)" from "endpoint_config_mdt"
```
```
name: endpoint_config_mdt
time MDT(s)
---- ------
0 33.7
```
### Example media component state configuration queries ### Example media component state configuration queries
......
###
# app configuration
# https://docs.pylonsproject.org/projects/pyramid/en/latest/narr/environment.html
###
[app:main]
use = egg:CLMCservice
pyramid.reload_templates = false
pyramid.debug_authorization = false
pyramid.debug_notfound = false
pyramid.debug_routematch = false
pyramid.default_locale_name = en
aggregator_running = false
###
# wsgi server configuration
###
[server:main]
use = egg:waitress#main
listen = *:8080
###
# logging configuration
# https://docs.pylonsproject.org/projects/pyramid/en/latest/narr/logging.html
###
[loggers]
keys = root, CLMCservice
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
[logger_CLMCservice]
level = WARN
handlers =
qualname = CLMCservice
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(asctime)s %(levelname)-5.5s [%(name)s:%(lineno)s][%(threadName)s] %(message)s
[pytest]
testpaths = CLMCservice
python_files = *.py
...@@ -21,18 +21,21 @@ ...@@ -21,18 +21,21 @@
// Created for Project : FLAME // Created for Project : FLAME
""" """
import os
import os.path
import subprocess
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import setup, find_packages from setuptools import setup, find_packages
def read(fname): requires = [
return open(os.path.join(os.path.dirname(__file__), fname)).read() 'plaster_pastedeploy',
'pyramid',
'pyramid_debugtoolbar',
'waitress',
'influxdb',
]
tests_require = [
'WebTest >= 1.3.1', # py3 compat
'pytest',
'pytest-cov',
]
def get_version(fname): def get_version(fname):
if os.path.isfile(fname): if os.path.isfile(fname):
...@@ -43,21 +46,29 @@ def get_version(fname): ...@@ -43,21 +46,29 @@ def get_version(fname):
return git_revision return git_revision
setup( setup(
name = "clmc", name = "CLMCservice",
version = get_version("clmctest/_version.py"), version = "SNAPSHOT",
author = "Michael Boniface", author = "Michael Boniface",
author_email = "mjb@it-innovation.soton.ac.uk", author_email = "mjb@it-innovation.soton.ac.uk",
description = "FLAME CLMC Test Module", description = "FLAME CLMC Testing Module",
license = "https://gitlab.it-innovation.soton.ac.uk/FLAME/flame-clmc/blob/integration/LICENSE", long_description="long description",
keywords = "FLAME CLMC", license = "license",
url='https://gitlab.it-innovation.soton.ac.uk/FLAME/flame-clmc', keywords = "FLAME CLMC service test",
packages=find_packages(exclude=["services"]), packages=find_packages(exclude=["services"]),
include_package_data=True, include_package_data=True,
package_data={'': ['_version.py', '*.yml', '*.sh', '*.json', '*.conf']}, install_requires=requires,
long_description="FLAME CLMC", extras_require={
'testing': tests_require,
},
package_data={'': ['git-commit-ref', '*.yml', '*.sh', '*.json', '*.conf']},
classifiers=[ classifiers=[
"Development Status :: Alpha", "Development Status :: Alpha",
"Topic :: FLAME Tests", "Topic :: FLAME Tests",
"License :: ", "License :: ",
], ],
entry_points={
'paste.app_factory': [
'main = CLMCservice:main',
],
},
) )
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment