diff --git a/.github/workflows/osrm-backend.yml b/.github/workflows/osrm-backend.yml index 2fe0647a6..9c365010f 100644 --- a/.github/workflows/osrm-backend.yml +++ b/.github/workflows/osrm-backend.yml @@ -601,6 +601,7 @@ jobs: # - run: echo "CI complete" benchmarks: + if: github.event_name == 'pull_request' runs-on: ubuntu-22.04 env: CCOMPILER: clang-13 diff --git a/scripts/ci/post_benchmark_results.py b/scripts/ci/post_benchmark_results.py index d0e16180c..331c1cee8 100644 --- a/scripts/ci/post_benchmark_results.py +++ b/scripts/ci/post_benchmark_results.py @@ -10,8 +10,8 @@ PR_NUMBER = os.getenv('PR_NUMBER') REPO_OWNER, REPO_NAME = REPO.split('/') def create_markdown_table(results): - header = "| Benchmark | Master | PR |\n|--------|----|" - rows = [f"| {result['name']} | {result['master']} | {result['pr']} |" for result in results] + header = "| Benchmark | Base | PR |\n|--------|----|" + rows = [f"| {result['name']} | {result['base']} | {result['pr']} |" for result in results] return f"{header}\n" + "\n".join(rows) def get_pr_comments(repo_owner, repo_name, pr_number): @@ -30,15 +30,15 @@ def update_comment(comment_id, repo_owner, repo_name, body): return response.json() -def collect_benchmark_results(master_folder, pr_folder): +def collect_benchmark_results(base_folder, pr_folder): results = [] results_index = {} - for file in os.listdir(master_folder): + for file in os.listdir(base_folder): if not file.endswith('.bench'): continue - with open(f"{master_folder}/{file}") as f: + with open(f"{base_folder}/{file}") as f: result = f.read().strip() - results.append({'master': result, 'pr': None, 'name': os.path.splitext(file)[0]}) + results.append({'base': result, 'pr': None, 'name': os.path.splitext(file)[0]}) results_index[file] = len(results) - 1 for file in os.listdir(pr_folder): @@ -48,39 +48,41 @@ def collect_benchmark_results(master_folder, pr_folder): if file in results_index: results[results_index[file]]['pr'] = result else: - results.append({'master': None, 'pr': result, 'name': os.path.splitext(file)[0]}) + results.append({'base': None, 'pr': result, 'name': os.path.splitext(file)[0]}) def main(): if len(sys.argv) != 3: - print("Usage: python post_benchmark_results.py ") + print("Usage: python post_benchmark_results.py ") exit(1) - master_folder = sys.argv[1] + base_folder = sys.argv[1] pr_folder = sys.argv[2] - benchmark_results = collect_benchmark_results(master_folder, pr_folder) + benchmark_results = collect_benchmark_results(base_folder, pr_folder) comments = get_pr_comments(REPO_OWNER, REPO_NAME, PR_NUMBER) - if comments and len(comments) > 0: - first_comment = comments[0] - markdown_table = create_markdown_table(benchmark_results) - new_benchmark_section = f"\n## Benchmark Results\n{markdown_table}\n" - - if re.search(r'.*', first_comment['body'], re.DOTALL): - updated_body = re.sub( - r'.*', - new_benchmark_section, - first_comment['body'], - flags=re.DOTALL - ) - else: - updated_body = f"{first_comment['body']}\n\n{new_benchmark_section}" - - update_comment(first_comment['id'], REPO_OWNER, REPO_NAME, updated_body) - print("PR comment updated successfully.") - else: + if not comments or len(comments) > 0: print("No comments found on this PR.") exit(1) + + first_comment = comments[0] + markdown_table = create_markdown_table(benchmark_results) + new_benchmark_section = f"\n## Benchmark Results\n{markdown_table}\n" + + if re.search(r'.*', first_comment['body'], re.DOTALL): + updated_body = re.sub( + r'.*', + new_benchmark_section, + first_comment['body'], + flags=re.DOTALL + ) + else: + updated_body = f"{first_comment['body']}\n\n{new_benchmark_section}" + + update_comment(first_comment['id'], REPO_OWNER, REPO_NAME, updated_body) + print("PR comment updated successfully.") + + if __name__ == "__main__": main() diff --git a/scripts/ci/run_benchmarks.sh b/scripts/ci/run_benchmarks.sh index 5d3b0e389..a747e2147 100755 --- a/scripts/ci/run_benchmarks.sh +++ b/scripts/ci/run_benchmarks.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash set -e pipefail function run_benchmarks_for_folder {