diff --git a/src/mediaServiceSim/LineProtocolGenerator.py b/src/mediaServiceSim/LineProtocolGenerator.py
index f57689a2a34d90871caf0a864b21c93a860bd2df..5f1c763e6b139452eb9198c864b4cb0155499334 100644
--- a/src/mediaServiceSim/LineProtocolGenerator.py
+++ b/src/mediaServiceSim/LineProtocolGenerator.py
@@ -18,7 +18,7 @@ def generate_network_report(recieved_bytes, sent_bytes, time):
     result += ' ' + str(_getNSTime(time))
 
     # Measurement
-    # print('network'+result)
+    print(result)
     return result
 
 
@@ -42,13 +42,14 @@ def generate_vm_config(state, cpu, mem, storage, time):
 
 
 # Reports cpu usage, scaling on requests
-def generate_cpu_report(cpu_useage, cpu_usage_system, time):
+def generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, time):
     result = 'vm_host_cpu_usage'
     # Tag
     result += ' '
     # field
-    result += 'cpu_usage='+str(cpu_useage)
-    result += ',cpu_usage_system='+str(cpu_usage_system)
+    result += 'cpu_usage='+str(cpu_usage)
+    result += ',cpu_active_time='+str(cpu_active_time)
+    result += ',cpu_idle_time='+str(cpu_idle_time)
     result += ' '
     # Time
     result += str(_getNSTime(time))
@@ -70,7 +71,7 @@ def generate_mpegdash_report(resource, requests, avg_response_time, peak_respons
     result += 'peak_response_time=' + str(peak_response_time)
     # Timestamp
     result += ' ' + str(_getNSTime(time))
-    # print(result)
+    print(result)
     return result
 
 
diff --git a/src/mediaServiceSim/simulator_v2.py b/src/mediaServiceSim/simulator_v2.py
index 9f06f9dc2ce226ff8d7956afc5cfaadf33791bc0..f9ca86ce9c4f72c0f3cbd4effa665a4fdb7f34bb 100644
--- a/src/mediaServiceSim/simulator_v2.py
+++ b/src/mediaServiceSim/simulator_v2.py
@@ -2,14 +2,20 @@ import LineProtocolGenerator as lp
 import time
 import urllib.parse
 import urllib.request
-from random import random, randint
+import sys
+import random
+
+# Simulation parameters
+TICK_TIME = 1
+DEFAULT_REQUEST_RATE_INCREMENT = 5
+SIMULATION_TIME_SEC = 10
+
+# CLMC parameters
+INFLUX_DB_URL = 'http://192.168.50.10:8086'
 
 # Simulator for services
 class sim:
     def __init__(self, influx_url):
-        # requests per second for different quality levels
-        self.quality_request_rate = {"DC1": [10, 20, 10], "DC2": [5, 30, 5]}
-
         # We don't need this as the db is CLMC metrics
         self.influx_db = 'CLMCMetrics'
         self.influx_url = influx_url
@@ -21,47 +27,100 @@ class sim:
     def run(self, simulation_length_seconds):
         start_time = time.time()
         current_time = int(time.time())
-        surrogate_services = [{'agent_url': 'http://192.168.50.11:8186', 'location': 'DC1', 'cpu': 2,
-                                'mem': '8GB', 'storage': '1TB'},
-                              {'agent_url': 'http://192.168.50.12:8186', 'location': 'DC2', 'cpu': 4, 
-                                'mem': '8GB', 'storage': '1TB'}
-                              ]
-        # Simulate surrogate services being asserted
-        for service in surrogate_services:
-            self._sendInfluxData(service['agent_url'], lp.generate_vm_config('starting', service['cpu'], service['mem'], service['storage'], current_time))
-        for service in surrogate_services:
-            self._sendInfluxData(service['agent_url'], lp.generate_vm_config('running', service['cpu'], service['mem'], service['storage'], current_time))
-
-        # Run simulation
-        for i in range(simulation_length_seconds):
-            for service in surrogate_services:
-                
-                # Scale CPU usage on number of requests, quality and cpu allocation
-                cpu_usage = self.quality_request_rate[service['location']][0]
-                cpu_usage += self.quality_request_rate[service['location']][1]*2
-                cpu_usage += self.quality_request_rate[service['location']][2]*4
-                cpu_usage = cpu_usage/service['cpu']
-                cpu_usage = cpu_usage/100 # Transform into %
-                self._sendInfluxData(service['agent_url'], lp.generate_cpu_report(                                                            cpu_usage, cpu_usage, current_time))
-                
-                # Scale SENT/REC bytes on requests and quality
-                bytes = self.quality_request_rate[service['location']][0]
-                bytes += self.quality_request_rate[service['location']][1]*2
-                bytes += self.quality_request_rate[service['location']][2]*4
-                bytes_sent = 1024*bytes
-                bytes_rec = 32*bytes
-                self._sendInfluxData(service['agent_url'], lp.generate_network_report(bytes_rec, bytes_sent, current_time))
-
-                # Scale MPEG Dash on requests, quality, cpu usage
-                avg_response_time = randint(0, 5 * self.quality_request_rate[service['location']][0])
-                avg_response_time += randint(0, 10 * self.quality_request_rate[service['location']][1])
-                avg_response_time += randint(0, 15 * self.quality_request_rate[service['location']][2])
-                avg_response_time *= cpu_usage
-                peak_response_time = avg_response_time + randint(30, 60)
-                requests = sum(self.quality_request_rate[service['location']])
-                self._sendInfluxData(service['agent_url'], lp.generate_mpegdash_report('https://Netflix.com/scream', requests, avg_response_time, peak_response_time, current_time))
-            # Add a second to the clock
-            current_time += 1000
+
+        ip_endpoints = [{'agent_url': 'http://192.168.50.11:8186', 'location': 'DC1', 'cpu': 16,
+                        'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0},
+                        {'agent_url': 'http://192.168.50.12:8186', 'location': 'DC2', 'cpu': 2, 
+                        'mem': '8GB', 'storage': '1TB', 'request_queue': 0, 'request_arrival_rate': 0}
+                        ]
+
+        # Simulate configuration of the ipendpoints
+        # endpoint state->mu, sigma, mS normal distribution
+        config_delay_dist = {"placed": [5000, 0.68], "booted": [10000, 0.68],"connected": [1000, 0.68]}
+
+        # Place the endpoints
+        max_delay = 0
+        for ip_endpoint in ip_endpoints:
+            delay_time = int(random.normalvariate(config_delay_dist['placed'][0], config_delay_dist['placed'][0]*config_delay_dist['placed'][1]))
+            self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config('placed', ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], current_time+delay_time)) 
+            if delay_time > max_delay:
+                max_delay = delay_time
+
+        current_time +=max_delay
+        max_delay = 0        
+        # Boot the endpoints
+        for ip_endpoint in ip_endpoints:
+            delay_time = int(random.normalvariate(config_delay_dist['booted'][0], config_delay_dist['booted'][0]*config_delay_dist['booted'][1]))
+            self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config('booted', ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], current_time+delay_time))
+            if delay_time > max_delay:
+                max_delay = delay_time            
+
+        current_time +=max_delay
+        max_delay = 0     
+        # Connect the endpoints
+        for ip_endpoint in ip_endpoints:
+            delay_time = int(random.normalvariate(config_delay_dist['connected'][0], config_delay_dist['connected'][0]*config_delay_dist['connected'][1]))
+            self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_vm_config('connected', ip_endpoint['cpu'], ip_endpoint['mem'], ip_endpoint['storage'], current_time+delay_time))
+            if delay_time > max_delay:
+                max_delay = delay_time
+
+        current_time +=max_delay
+   
+        request_arrival_rate_inc = DEFAULT_REQUEST_RATE_INCREMENT
+        request_queue = 0
+        for i in range(simulation_length_seconds):        
+            for ip_endpoint in ip_endpoints:
+                request_processing_time = 0
+                cpu_time_available = 0
+                requests_processed = 0
+                max_requests_processed = 0
+                cpu_active_time = 0
+                cpu_idle_time = 0
+                cpu_usage = 0
+                cpu_load_time = 0
+                avg_response_time = 0
+                peak_response_time = 0
+
+                # linear inc to arrival rate
+                ip_endpoint['request_arrival_rate'] += request_arrival_rate_inc
+                # add new requests to the queue
+                ip_endpoint['request_queue'] += ip_endpoint['request_arrival_rate']
+
+                # time to process one request (mS) in the current second
+                request_processing_time = int(random.normalvariate(50, 50*0.95))
+                if request_processing_time <= 10:
+                    request_processing_time = 10
+                # amount of cpu time (mS) per tick
+                cpu_time_available = ip_endpoint['cpu']*TICK_TIME*1000
+                max_requests_processed = cpu_time_available/request_processing_time
+                # calc how many requests processed
+                if ip_endpoint['request_queue'] <= max_requests_processed:
+                    # processed all of the requests
+                    requests_processed = ip_endpoint['request_queue']
+                else:
+                    # processed the maxmum number of requests
+                    requests_processed = max_requests_processed
+                # calculate cpu usage
+                cpu_active_time = int(requests_processed*request_processing_time)
+                cpu_idle_time = int(cpu_time_available-cpu_active_time)
+                cpu_usage = cpu_active_time/cpu_time_available
+                self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_cpu_report(cpu_usage, cpu_active_time, cpu_idle_time, current_time))
+
+                # calc network usage metrics with no constraints. 
+                bytes_sent = 1024*requests_processed
+                bytes_rec = 32*requests_processed
+                self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_network_report(bytes_rec, bytes_sent, current_time))                
+
+                # time to process all of the requests in the queue
+                peak_response_time = ip_endpoint['request_queue']*request_processing_time/ip_endpoint['cpu']
+                # mid-range 
+                avg_response_time = (peak_response_time+request_processing_time)/2
+                self._sendInfluxData(ip_endpoint['agent_url'], lp.generate_mpegdash_report('https://Netflix.com/scream', ip_endpoint['request_arrival_rate'], avg_response_time, peak_response_time, current_time))
+
+                # remove requests processed off the queue
+                ip_endpoint['request_queue'] -= int(requests_processed)            
+
+            current_time += TICK_TIME*1000
         end_time = time.time()
         print("Simulation Finished. Start time {0}. End time {1}. Total time {2}".format(start_time,end_time,end_time-start_time))
 
@@ -83,9 +142,9 @@ class sim:
         data = data.encode()
         header = {'Content-Type': 'application/octet-stream'}
         req = urllib.request.Request(url + '/write?db=' + self.influx_db, data, header)
-        urllib.request.urlopen(req)
+        urllib.request.urlopen(req)  
 
 
-simulator = sim('http://192.168.50.10:8086')
-simulator.run(180)
+simulator = sim(INFLUX_DB_URL)
+simulator.run(SIMULATION_TIME_SEC)