From f56d62ef5b6575a1ae186b01b9260c489c17f593 Mon Sep 17 00:00:00 2001
From: MJB <mjb@it-innovation.soton.ac.uk>
Date: Sun, 21 Jan 2018 17:23:29 +0000
Subject: [PATCH] fixed simulation error as the timestamps where converted
 incorrectly to mS when the precision in influx expects nS, the queries now()
 - <time> work in Chronograf

---
 src/mediaServiceSim/LineProtocolGenerator.py |  4 +-
 src/mediaServiceSim/simulator_v2.py          | 46 ++++++++++++--------
 2 files changed, 30 insertions(+), 20 deletions(-)

diff --git a/src/mediaServiceSim/LineProtocolGenerator.py b/src/mediaServiceSim/LineProtocolGenerator.py
index 5f1c763..a8ccb92 100644
--- a/src/mediaServiceSim/LineProtocolGenerator.py
+++ b/src/mediaServiceSim/LineProtocolGenerator.py
@@ -83,7 +83,9 @@ def quote_wrap(str):
 # InfluxDB likes to have time-stamps in nanoseconds
 def _getNSTime(time):
     # Convert to nano-seconds
-    return 1000000 * time
+    timestamp = int(1000000000*time)
+    print("timestamp", timestamp)
+    return timestamp
 
 # DEPRICATED
 # ____________________________________________________________________________
diff --git a/src/mediaServiceSim/simulator_v2.py b/src/mediaServiceSim/simulator_v2.py
index f9ca86c..8cf7a9e 100644
--- a/src/mediaServiceSim/simulator_v2.py
+++ b/src/mediaServiceSim/simulator_v2.py
@@ -8,7 +8,7 @@ import random
 # Simulation parameters
 TICK_TIME = 1
 DEFAULT_REQUEST_RATE_INCREMENT = 5
-SIMULATION_TIME_SEC = 10
+SIMULATION_TIME_SEC = 180
 
 # CLMC parameters
 INFLUX_DB_URL = 'http://192.168.50.10:8086'
@@ -25,8 +25,8 @@ class sim:
 
 
     def run(self, simulation_length_seconds):
-        start_time = time.time()
-        current_time = int(time.time())
+        start_time = time.time()-SIMULATION_TIME_SEC
+        sim_time = start_time
 
         ip_endpoints = [{'agent_url': 'http://192.168.50.11:8186', 'location': 'DC1', 'cpu': 16,
                         'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0},
@@ -35,36 +35,44 @@ class sim:
                         ]
 
         # Simulate configuration of the ipendpoints
-        # endpoint state->mu, sigma, mS normal distribution
-        config_delay_dist = {"placed": [5000, 0.68], "booted": [10000, 0.68],"connected": [1000, 0.68]}
+        # endpoint state->mu, sigma, secs normal distribution
+        config_delay_dist = {"placed": [5, 0.68], "booted": [10, 0.68],"connected": [10, 0.68]}
 
         # Place the endpoints
         max_delay = 0
         for ip_endpoint in ip_endpoints:
-            delay_time = int(random.normalvariate(config_delay_dist['placed'][0], config_delay_dist['placed'][0]*config_delay_dist['placed'][1]))
-            self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config('placed', ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], current_time+delay_time)) 
+            delay_time = random.normalvariate(config_delay_dist['placed'][0], config_delay_dist['placed'][0]*config_delay_dist['placed'][1])
+            
+         #   print('sim_time: ', sim_time)            
+         #   print('delay_time: ', delay_time)
+         #   timestamp = sim_time+delay_time
+         #   print('timestamp: ', timestamp)
+         #   ns_time = int(timestamp*1000000)
+         #   print('ns_time: ', ns_time)
+
+            self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config('placed', ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], sim_time+delay_time)) 
             if delay_time > max_delay:
                 max_delay = delay_time
 
-        current_time +=max_delay
+        sim_time +=max_delay
         max_delay = 0        
         # Boot the endpoints
         for ip_endpoint in ip_endpoints:
-            delay_time = int(random.normalvariate(config_delay_dist['booted'][0], config_delay_dist['booted'][0]*config_delay_dist['booted'][1]))
-            self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config('booted', ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], current_time+delay_time))
+            delay_time = random.normalvariate(config_delay_dist['booted'][0], config_delay_dist['booted'][0]*config_delay_dist['booted'][1])
+            self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config('booted', ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], sim_time+delay_time))
             if delay_time > max_delay:
                 max_delay = delay_time            
 
-        current_time +=max_delay
+        sim_time +=max_delay
         max_delay = 0     
         # Connect the endpoints
         for ip_endpoint in ip_endpoints:
-            delay_time = int(random.normalvariate(config_delay_dist['connected'][0], config_delay_dist['connected'][0]*config_delay_dist['connected'][1]))
-            self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config('connected', ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], current_time+delay_time))
+            delay_time = random.normalvariate(config_delay_dist['connected'][0], config_delay_dist['connected'][0]*config_delay_dist['connected'][1])
+            self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config('connected', ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], sim_time+delay_time))
             if delay_time > max_delay:
                 max_delay = delay_time
 
-        current_time +=max_delay
+        sim_time +=max_delay
    
         request_arrival_rate_inc = DEFAULT_REQUEST_RATE_INCREMENT
         request_queue = 0
@@ -104,24 +112,24 @@ class sim:
                 cpu_active_time = int(requests_processed*request_processing_time)
                 cpu_idle_time = int(cpu_time_available-cpu_active_time)
                 cpu_usage = cpu_active_time/cpu_time_available
-                self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, current_time))
+                self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, sim_time))
 
                 # calc network usage metrics with no constraints. 
                 bytes_sent = 1024*requests_processed
                 bytes_rec = 32*requests_processed
-                self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_network_report(bytes_rec, bytes_sent, current_time))                
+                self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_network_report(bytes_rec, bytes_sent, sim_time))                
 
                 # time to process all of the requests in the queue
                 peak_response_time = ip_endpoint['request_queue']*request_processing_time/ip_endpoint['cpu']
                 # mid-range 
                 avg_response_time = (peak_response_time+request_processing_time)/2
-                self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_mpegdash_report('https://Netflix.com/scream', ip_endpoint['request_arrival_rate'], avg_response_time, peak_response_time, current_time))
+                self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_mpegdash_report('https://Netflix.com/scream', ip_endpoint['request_arrival_rate'], avg_response_time, peak_response_time, sim_time))
 
                 # remove requests processed off the queue
                 ip_endpoint['request_queue'] -= int(requests_processed)            
 
-            current_time += TICK_TIME*1000
-        end_time = time.time()
+            sim_time += TICK_TIME
+        end_time = sim_time
         print("Simulation Finished. Start time {0}. End time {1}. Total time {2}".format(start_time,end_time,end_time-start_time))
 
     def _createDB(self):
-- 
GitLab