diff --git a/scripts/ci/locustfile.py b/scripts/ci/locustfile.py index 7715d35cf..0868df6df 100644 --- a/scripts/ci/locustfile.py +++ b/scripts/ci/locustfile.py @@ -28,79 +28,79 @@ class OSRMTasks(TaskSet): start_coord = f"{start[1]:.6f},{start[0]:.6f}" end_coord = f"{end[1]:.6f},{end[0]:.6f}" - self.client.get(f"/route/v1/driving/{start_coord};{end_coord}?overview=full&steps=true") + self.client.get(f"/route/v1/driving/{start_coord};{end_coord}?overview=full&steps=true", name="/route/v1/driving") - @task - def get_table(self): - num_coords = random.randint(3, 250) - selected_coords = random.sample(self.coordinates, num_coords) - coords_str = ";".join([f"{coord[1]:.6f},{coord[0]:.6f}" for coord in selected_coords]) + # @task + # def get_table(self): + # num_coords = random.randint(3, 250) + # selected_coords = random.sample(self.coordinates, num_coords) + # coords_str = ";".join([f"{coord[1]:.6f},{coord[0]:.6f}" for coord in selected_coords]) - self.client.get(f"/table/v1/driving/{coords_str}") + # self.client.get(f"/table/v1/driving/{coords_str}") - @task - def get_match(self): - num_coords = random.randint(3, 250) - track_id = random.choice(self.track_ids) - track_coords = self.tracks[track_id][:num_coords] - coords_str = ";".join([f"{coord[1]:.6f},{coord[0]:.6f}" for coord in track_coords]) + # @task + # def get_match(self): + # num_coords = random.randint(3, 250) + # track_id = random.choice(self.track_ids) + # track_coords = self.tracks[track_id][:num_coords] + # coords_str = ";".join([f"{coord[1]:.6f},{coord[0]:.6f}" for coord in track_coords]) - self.client.get(f"/match/v1/driving/{coords_str}?steps=true") + # self.client.get(f"/match/v1/driving/{coords_str}?steps=true") - @task - def get_nearest(self): - coord = random.choice(self.coordinates) - coord_str = f"{coord[1]:.6f},{coord[0]:.6f}" + # @task + # def get_nearest(self): + # coord = random.choice(self.coordinates) + # coord_str = f"{coord[1]:.6f},{coord[0]:.6f}" - self.client.get(f"/nearest/v1/driving/{coord_str}") + # self.client.get(f"/nearest/v1/driving/{coord_str}") - @task - def get_trip(self): - num_coords = random.randint(2, 10) - selected_coords = random.sample(self.coordinates, num_coords) - coords_str = ";".join([f"{coord[1]:.6f},{coord[0]:.6f}" for coord in selected_coords]) + # @task + # def get_trip(self): + # num_coords = random.randint(2, 10) + # selected_coords = random.sample(self.coordinates, num_coords) + # coords_str = ";".join([f"{coord[1]:.6f},{coord[0]:.6f}" for coord in selected_coords]) - self.client.get(f"/trip/v1/driving/{coords_str}?steps=true") + # self.client.get(f"/trip/v1/driving/{coords_str}?steps=true") class OSRMUser(HttpUser): tasks = [OSRMTasks] wait_time = between(0.01, 0.1) - @events.quitting.add_listener - def _(environment, **kwargs): - def calculate_percentiles(stats, percentiles): - response_times = sorted(stats.get_response_times()) - percentile_values = {} - for percentile in percentiles: - rank = int(len(response_times) * (percentile / 100)) - percentile_values[percentile] = response_times[rank-1] if rank > 0 else 0 - return percentile_values + # @events.quitting.add_listener + # def _(environment, **kwargs): + # def calculate_percentiles(stats, percentiles): + # response_times = sorted(stats.get_response_times()) + # percentile_values = {} + # for percentile in percentiles: + # rank = int(len(response_times) * (percentile / 100)) + # percentile_values[percentile] = response_times[rank-1] if rank > 0 else 0 + # return percentile_values - nearest_stats = environment.stats.get("/nearest/v1/driving", "GET") - route_stats = environment.stats.get("/route/v1/driving", "GET") + # nearest_stats = environment.stats.get("/nearest/v1/driving", "GET") + # route_stats = environment.stats.get("/route/v1/driving", "GET") - nearest_percentiles = calculate_percentiles(nearest_stats, [95, 99]) - route_percentiles = calculate_percentiles(route_stats, [95, 99]) + # nearest_percentiles = calculate_percentiles(nearest_stats, [95, 99]) + # route_percentiles = calculate_percentiles(route_stats, [95, 99]) - print("\nAggregated Statistics for /nearest/v1/driving:") - print(f"Request Count: {nearest_stats.num_requests}") - print(f"Failure Count: {nearest_stats.num_failures}") - print(f"Median Response Time: {nearest_stats.median_response_time}") - print(f"Average Response Time: {nearest_stats.avg_response_time}") - print(f"Min Response Time: {nearest_stats.min_response_time}") - print(f"Max Response Time: {nearest_stats.max_response_time}") - print(f"Average Content Size: {nearest_stats.avg_content_length}") - print(f"p95 Response Time: {nearest_percentiles[95]}") - print(f"p99 Response Time: {nearest_percentiles[99]}") + # print("\nAggregated Statistics for /nearest/v1/driving:") + # print(f"Request Count: {nearest_stats.num_requests}") + # print(f"Failure Count: {nearest_stats.num_failures}") + # print(f"Median Response Time: {nearest_stats.median_response_time}") + # print(f"Average Response Time: {nearest_stats.avg_response_time}") + # print(f"Min Response Time: {nearest_stats.min_response_time}") + # print(f"Max Response Time: {nearest_stats.max_response_time}") + # print(f"Average Content Size: {nearest_stats.avg_content_length}") + # print(f"p95 Response Time: {nearest_percentiles[95]}") + # print(f"p99 Response Time: {nearest_percentiles[99]}") - print("\nAggregated Statistics for /route/v1/driving:") - print(f"Request Count: {route_stats.num_requests}") - print(f"Failure Count: {route_stats.num_failures}") - print(f"Median Response Time: {route_stats.median_response_time}") - print(f"Average Response Time: {route_stats.avg_response_time}") - print(f"Min Response Time: {route_stats.min_response_time}") - print(f"Max Response Time: {route_stats.max_response_time}") - print(f"Average Content Size: {route_stats.avg_content_length}") - print(f"p95 Response Time: {route_percentiles[95]}") - print(f"p99 Response Time: {route_percentiles[99]}") + # print("\nAggregated Statistics for /route/v1/driving:") + # print(f"Request Count: {route_stats.num_requests}") + # print(f"Failure Count: {route_stats.num_failures}") + # print(f"Median Response Time: {route_stats.median_response_time}") + # print(f"Average Response Time: {route_stats.avg_response_time}") + # print(f"Min Response Time: {route_stats.min_response_time}") + # print(f"Max Response Time: {route_stats.max_response_time}") + # print(f"Average Content Size: {route_stats.avg_content_length}") + # print(f"p95 Response Time: {route_percentiles[95]}") + # print(f"p99 Response Time: {route_percentiles[99]}") diff --git a/scripts/ci/run_benchmarks.sh b/scripts/ci/run_benchmarks.sh index 4a7a3e9c1..884ec80ad 100755 --- a/scripts/ci/run_benchmarks.sh +++ b/scripts/ci/run_benchmarks.sh @@ -35,7 +35,15 @@ function run_benchmarks_for_folder { OSRM_ROUTED_PID=$! curl --retry-delay 3 --retry 10 --retry-all-errors "http://127.0.0.1:5000/route/v1/driving/13.388860,52.517037;13.385983,52.496891?steps=true" - locust -f $FOLDER/scripts/ci/locustfile.py --headless --users 10 --spawn-rate 1 --host http://localhost:5000 --run-time 1m --csv=results --loglevel ERROR + locust -f $FOLDER/scripts/ci/locustfile.py \ + --headless \ + --processes -1 \ + --users 10 \ + --spawn-rate 1 \ + --host http://localhost:5000 \ + --run-time 1m \ + --csv=results \ + --loglevel ERROR echo "STATS: " cat results_stats.csv