diff --git a/test/services/ipendpoint/telegraf_ipendpoint.conf b/test/services/ipendpoint/telegraf_ipendpoint.conf index 1fdd33a7b4c8eef188469cd9c94bcef746351ed4..efe72dc2d05e785c1615f1b6c55794294f630be2 100644 --- a/test/services/ipendpoint/telegraf_ipendpoint.conf +++ b/test/services/ipendpoint/telegraf_ipendpoint.conf @@ -1,68 +1,3 @@ - -############################################################################### -# INPUTS # -############################################################################### -# # Read metrics about network interface usage - [[inputs.net]] -# ## By default, telegraf gathers stats from any up interface (excluding loopback) -# ## Setting interfaces will tell it to gather these explicit interfaces, -# ## regardless of status. -# ## -# # interfaces = ["eth0"] - -# Read metrics about cpu usage -[[inputs.cpu]] - ## Whether to report per-cpu stats or not - percpu = true - ## Whether to report total system cpu stats or not - totalcpu = true - ## If true, collect raw CPU time metrics. - collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. - #report_active = false - - -# Read metrics about disk usage by mount point -[[inputs.disk]] - ## By default, telegraf gather stats for all mountpoints. - ## Setting mountpoints will restrict the stats to the specified mountpoints. - # mount_points = ["/"] - - ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually - ## present on /run, /var/run, /dev/shm or /dev). - ignore_fs = ["tmpfs", "devtmpfs", "devfs"] - - -# Read metrics about disk IO by device -[[inputs.diskio]] - ## By default, telegraf will gather stats for all devices including - ## disk partitions. - ## Setting devices will restrict the stats to the specified devices. - # devices = ["sda", "sdb"] - ## Uncomment the following line if you need disk serial numbers. - # skip_serial_number = false - # - ## On systems which support it, device metadata can be added in the form of - ## tags. - ## Currently only Linux is supported via udev properties. You can view - ## available properties for a device by running: - ## 'udevadm info -q property -n /dev/sda' - # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] - # - ## Using the same metadata source as device_tags, you can also customize the - ## name of the device via templates. - ## The 'name_templates' parameter is a list of templates to try and apply to - ## the device. The template may contain variables in the form of '$PROPERTY' or - ## '${PROPERTY}'. The first template which does not contain any variables not - ## present for the device is used as the device name tag. - ## The typical use case is for LVM volumes, to get the VG/LV name instead of - ## the near-meaningless DM-0 name. - # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] - -# Read metrics about memory usage -[[inputs.mem]] - # no configuration - # # Influx HTTP write listener [[inputs.http_listener]] ## Address and port to host HTTP listener on diff --git a/test/streaming-sim/StreamingSim.py b/test/streaming-sim/StreamingSim.py index 68374ff040eafa49e9da78611e20313275326022..cb66ccb72d01a783d918b73d88bdc48af9db249f 100644 --- a/test/streaming-sim/StreamingSim.py +++ b/test/streaming-sim/StreamingSim.py @@ -15,10 +15,10 @@ DEFAULT_REQUEST_RATE_INC_PERIOD = 10 SIMULATION_TIME_SEC = 60 * 60 # CLMC parameters -INFLUX_DB_URL = 'http://192.168.50.10:8086' +INFLUX_DB_URL = 'http://172.23.1.20:8086' INFLUX_DB_NAME = 'CLMCMetrics' -AGENT_URL1 = 'http://192.168.50.11:8186' -AGENT_URL2 = 'http://192.168.50.12:8186' +AGENT1_URL = 'http://172.23.1.21:8186' +AGENT2_URL = 'http://172.23.1.22:8186' class Sim(object): @@ -26,7 +26,7 @@ class Sim(object): Simulator for services """ - def __init__(self, influx_url, influx_db_name): + def __init__(self, influx_url, influx_db_name, agent1_url, agent2_url): """ Sets up the simulator object @@ -35,6 +35,8 @@ class Sim(object): """ self.influx_db_name = influx_db_name + self.agent1_url = agent1_url + self.agent2_url = agent2_url # influx db client is created on initialisation, which will handle the influx DB queries url_object = urllib.parse.urlparse(influx_url) @@ -61,10 +63,10 @@ class Sim(object): # segment_size : the length of video requested at a time # bit_rate: MPEG-2 High 1080p 25fps = 80Mbps - ip_endpoints = [{'agent_url': AGENT_URL1, 'location': 'DC1', 'cpu': 16, + ip_endpoints = [{'agent_url': self.agent1_url, 'location': 'DC1', 'cpu': 16, 'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0, 'segment_size': 2, 'video_bit_rate': 80, 'packet_size': 1500}, - {'agent_url': AGENT_URL2, 'location': 'DC2', 'cpu': 4, + {'agent_url': self.agent2_url, 'location': 'DC2', 'cpu': 4, 'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0, 'segment_size': 2, 'video_bit_rate': 80, 'packet_size': 1500} ] @@ -223,26 +225,34 @@ class Sim(object): @pytest.fixture(scope='module') -def run_simulation_fixture(): +def run_simulation_fixture(streaming_sim_config): """ A fixture, which checks if the the DB has been created, if not it runs the simulator with a 10 seconds timeout after that """ + influx_db_url = "http://" + streaming_sim_config['hosts'][0]['ip_address'] + ":8086" + agent1_url = "http://" + streaming_sim_config['hosts'][1]['ip_address'] + ":8186" + agent2_url = "http://" + streaming_sim_config['hosts'][2]['ip_address'] + ":8186" + global INFLUX_DB_URL global INFLUX_DB_NAME global SIMULATION_TIME_SEC + global AGENT1_URL + global AGENT2_URL - simulator = Sim(INFLUX_DB_URL, INFLUX_DB_NAME) + simulator = Sim(influx_db_url, INFLUX_DB_NAME, agent1_url, agent2_url) dbs = simulator.db_client.get_list_database() dbs = [db.get("name") for db in dbs] - if INFLUX_DB_NAME not in dbs: - simulator.reset() - simulator.run(SIMULATION_TIME_SEC) + # This check needed to be disabled as the CLMCMetrics database is always created when + # the test starts, irrespective of whether this is the 1st time or not +# if INFLUX_DB_NAME not in dbs: + simulator.reset() + simulator.run(SIMULATION_TIME_SEC) - print("10 seconds timeout is given so that the data could properly be inserted into the database.") - import time - time.sleep(10) + print("10 seconds timeout is given so that the data could properly be inserted into the database.") + import time + time.sleep(10) def run_simulation(generate=True): @@ -254,8 +264,10 @@ def run_simulation(generate=True): global INFLUX_DB_NAME global INFLUX_DB_URL global SIMULATION_TIME_SEC + global AGENT1_URL + global AGENT2_URL - simulator = Sim(INFLUX_DB_URL, INFLUX_DB_NAME) + simulator = Sim(INFLUX_DB_URL, INFLUX_DB_NAME, AGENT1_URL, AGENT2_URL) if generate: simulator.reset() diff --git a/test/streaming-sim/rspec.yml b/test/streaming-sim/rspec.yml index 5709115b88f6b201e56278aa45933ef7f34ab071..399af32ec7b41dbbc94043c5cff8d9e3ea01bdf5 100644 --- a/test/streaming-sim/rspec.yml +++ b/test/streaming-sim/rspec.yml @@ -10,7 +10,7 @@ hosts: host: 8888 - guest: 9092 host: 9092 - ip_address: "192.168.50.10" + ip_address: "203.0.113.100" - name: ipendpoint1 cpus: 1 memory: 2048 @@ -19,14 +19,14 @@ hosts: forward_ports: - guest: 80 host: 8081 - ip_address: "192.168.50.11" + ip_address: "203.0.113.101" location: "DC1" sfc_id: "MS_Template_1" sfc_id_instance: "MS_I1" sf_id: "adaptive_streaming" sf_id_instance: "adaptive_streaming_I1" ipendpoint_id: "adaptive_streaming_I1_apache1" - influxdb_url: "http://192.168.50.10:8086" + influxdb_url: "http://203.0.113.100:8086" database_name: "CLMCMetrics" - name: ipendpoint2 cpus: 1 @@ -36,12 +36,12 @@ hosts: forward_ports: - guest: 80 host: 8082 - ip_address: "192.168.50.12" + ip_address: "203.0.113.102" location: "DC2" sfc_id: "MS_Template_1" sfc_id_instance: "MS_I1" sf_id: "adaptive_streaming" sf_id_instance: "adaptive_streaming_I1" ipendpoint_id: "adaptive_streaming_I1_apache2" - influxdb_url: "http://192.168.50.10:8086" + influxdb_url: "http://203.0.113.100:8086" database_name: "CLMCMetrics" \ No newline at end of file diff --git a/test/telegraf-agents/rspec.yml b/test/telegraf-agents/rspec.yml index 0aef98861c3299a6788b53ad4eb641c2cd4ae6b3..e2290d4342ae999e89af4ddc8506a7a7d1ee5f48 100644 --- a/test/telegraf-agents/rspec.yml +++ b/test/telegraf-agents/rspec.yml @@ -79,11 +79,11 @@ hosts: ipendpoint_id: "metadata_database_I1_apache1" influxdb_url: "http://192.168.50.10:8086" database_name: "CLMCMetrics" - - name: ipendpoint + - name: host cpus: 1 memory: 2048 disk: "10GB" - service_name: "ipendpoint" + service_name: "host" forward_ports: - guest: 80 host: 8085 diff --git a/test/telegraf-agents/test_telegraf_agents.py b/test/telegraf-agents/test_telegraf_agents.py index 77b6290e1603a24e0886f59f6ceb9ba86a1a7c35..b55f46a515940c7d6ab109fb7ea4caba7f0ff3ee 100644 --- a/test/telegraf-agents/test_telegraf_agents.py +++ b/test/telegraf-agents/test_telegraf_agents.py @@ -1,12 +1,33 @@ #!/usr/bin/python3 import pytest - +from influxdb import InfluxDBClient + +@pytest.mark.parametrize("service_name", [ + ('clmc-service'), + ('apache'), + ('nginx'), + ('mongo'), + ('ffmpeg'), + ('host'), + ]) +def test_service_name(telegraf_agent_config, service_name): + assert any(s['name'] == service_name for s in telegraf_agent_config['hosts']), "{0} not in list of hosts".format(service_name) + +def test_ping(telegraf_agent_config): + """This test will only run on linux as the process call is not portable, there's a better way""" + for host in telegraf_agent_config['hosts']: + response = os.system("ping -c 1 " + host['ip_address']) + assert response == 0, "Could not ping {0} on ip address {1}".format(host['name'], host['ip_address']) @pytest.mark.parametrize("measurement, query, expected_result", [ ('nginx', 'SELECT mean("requests") AS "mean" FROM "CLMCMetrics"."autogen"."nginx"', 0), ('cpu', 'SELECT mean("usage_idle") AS "mean" FROM "CLMCMetrics"."autogen"."cpu"', 0), - ('mongodb', 'SELECT mean("net_in_bytes") AS "mean" FROM "CLMCMetrics"."autogen"."mongodb"', 0) + ('mongodb', 'SELECT mean("net_in_bytes") AS "mean" FROM "CLMCMetrics"."autogen"."mongodb"', 0), + ('net', 'SELECT mean("bytes_sent") AS "mean" FROM "CLMCMetrics"."autogen"."net"', 0), + ('disk', 'SELECT mean("free") AS "mean" FROM "CLMCMetrics"."autogen"."disk"', 0), + ('diskio', 'SELECT mean("write_bytes") AS "mean" FROM "CLMCMetrics"."autogen"."diskio"', 0), + ('mem', 'SELECT mean("free") AS "mean" FROM "CLMCMetrics"."autogen"."mem"', 0) ]) def test_all_inputs(influxdb, measurement, query, expected_result): """ @@ -28,22 +49,11 @@ def test_all_inputs(influxdb, measurement, query, expected_result): assert actual_result > expected_result, "actual result {0} is not > expected result {1} for query {2}".format(actual_result, str(expected_result), query) -@pytest.mark.parametrize("ipendpoint, measurements", [ - ('id1', [{'measurement': 'cpu', 'query': 'query', 'result': 'result'} , {'measurement': 'nginx', 'query': 'query', 'result': 'result'}, {'measurement': 'mongo', 'query': 'query', 'result': 'result'}]), - ('id2', [{'measurement': 'cpu', 'query': 'query', 'result': 'result'} , {'measurement': 'nginx', 'query': 'query', 'result': 'result'}]) -]) -def test_multiple_inputs_on_a_service(influxdb, ipendpoint, measurements): - """This test checks that a service configured with multiple input plugins as separate telegraf config files generates measurements in the database - """ - # for each item in the measurement list run the query and test the result - assert 1 - - -@pytest.mark.parametrize("query, expected_result", [ - ('filter query', 0), - ('filter query', 0), - ('filter query', 0) -]) +@pytest.mark.parametrize("query, expected_result", + [('filter query', 0), + ('filter query', 0), + ('filter query', 0) + ]) def test_global_tag_filtering(influxdb, query, expected_result): """Tests that the global tags are inserted correctly into the global configuration using the install CLMC script """