Calculate confidence interval for benchmark measurements (#6950)
This commit is contained in:
parent
d9ce9cf780
commit
e8da3d9231
27
.github/workflows/osrm-backend.yml
vendored
27
.github/workflows/osrm-backend.yml
vendored
@ -700,15 +700,6 @@ jobs:
|
||||
mkdir -p $HOME/.ccache
|
||||
ccache --zero-stats
|
||||
ccache --max-size=256M
|
||||
- name: Build PR Branch
|
||||
run: |
|
||||
mkdir -p pr/build
|
||||
cd pr/build
|
||||
cmake -DENABLE_CONAN=ON -DCMAKE_BUILD_TYPE=Release ..
|
||||
make -j$(nproc)
|
||||
make -j$(nproc) benchmarks
|
||||
cd ..
|
||||
make -C test/data
|
||||
- name: Checkout Base Branch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@ -723,9 +714,23 @@ jobs:
|
||||
make -j$(nproc) benchmarks
|
||||
cd ..
|
||||
make -C test/data
|
||||
- name: Run Benchmarks
|
||||
- name: Build PR Branch
|
||||
run: |
|
||||
./pr/scripts/ci/run_benchmarks.sh base pr
|
||||
mkdir -p pr/build
|
||||
cd pr/build
|
||||
cmake -DENABLE_CONAN=ON -DCMAKE_BUILD_TYPE=Release ..
|
||||
make -j$(nproc)
|
||||
make -j$(nproc) benchmarks
|
||||
cd ..
|
||||
make -C test/data
|
||||
- name: Run PR Benchmarks
|
||||
run: |
|
||||
./pr/scripts/ci/run_benchmarks.sh -f $(pwd)/pr -r $(pwd)/pr_results -s $(pwd)/pr -b $(pwd)/pr/build -o ~/data.osm.pbf -g ~/gps_traces.csv
|
||||
- name: Run Base Benchmarks
|
||||
run: |
|
||||
# we intentionally use scripts from PR branch to be able to update them and see results in the same PR
|
||||
./pr/scripts/ci/run_benchmarks.sh -f $(pwd)/base -r $(pwd)/base_results -s $(pwd)/pr -b $(pwd)/base/build -o ~/data.osm.pbf -g ~/gps_traces.csv
|
||||
|
||||
- name: Post Benchmark Results
|
||||
run: |
|
||||
python3 pr/scripts/ci/post_benchmark_results.py base_results pr_results
|
||||
|
@ -1,5 +1,4 @@
|
||||
import requests
|
||||
import sys
|
||||
import random
|
||||
from collections import defaultdict
|
||||
import os
|
||||
@ -8,12 +7,13 @@ import numpy as np
|
||||
import time
|
||||
import argparse
|
||||
|
||||
|
||||
class BenchmarkRunner:
|
||||
def __init__(self):
|
||||
def __init__(self, gps_traces_file_path):
|
||||
self.coordinates = []
|
||||
self.tracks = defaultdict(list)
|
||||
|
||||
gps_traces_file_path = os.path.expanduser('~/gps_traces.csv')
|
||||
gps_traces_file_path = os.path.expanduser(gps_traces_file_path)
|
||||
with open(gps_traces_file_path, 'r') as file:
|
||||
reader = csv.DictReader(file)
|
||||
for row in reader:
|
||||
@ -36,9 +36,8 @@ class BenchmarkRunner:
|
||||
response = requests.get(url)
|
||||
end_time = time.time()
|
||||
if response.status_code != 200:
|
||||
if benchmark_name == 'match':
|
||||
code = response.json()['code']
|
||||
if code == 'NoSegment' or code == 'NoMatch':
|
||||
if code in ['NoSegment', 'NoMatch', 'NoRoute', 'NoTrips']:
|
||||
continue
|
||||
raise Exception(f"Error: {response.status_code} {response.text}")
|
||||
times.append((end_time - start_time) * 1000) # convert to ms
|
||||
@ -54,7 +53,7 @@ class BenchmarkRunner:
|
||||
end_coord = f"{end[1]:.6f},{end[0]:.6f}"
|
||||
return f"{host}/route/v1/driving/{start_coord};{end_coord}?overview=full&steps=true"
|
||||
elif benchmark_name == 'table':
|
||||
num_coords = random.randint(3, 100)
|
||||
num_coords = random.randint(3, 12)
|
||||
selected_coords = random.sample(self.coordinates, num_coords)
|
||||
coords_str = ";".join([f"{coord[1]:.6f},{coord[0]:.6f}" for coord in selected_coords])
|
||||
return f"{host}/table/v1/driving/{coords_str}"
|
||||
@ -77,26 +76,63 @@ class BenchmarkRunner:
|
||||
else:
|
||||
raise Exception(f"Unknown benchmark: {benchmark_name}")
|
||||
|
||||
def bootstrap_confidence_interval(data, num_samples=1000, confidence_level=0.95):
|
||||
means = []
|
||||
for _ in range(num_samples):
|
||||
sample = np.random.choice(data, size=len(data), replace=True)
|
||||
means.append(np.mean(sample))
|
||||
lower_bound = np.percentile(means, (1 - confidence_level) / 2 * 100)
|
||||
upper_bound = np.percentile(means, (1 + confidence_level) / 2 * 100)
|
||||
mean = np.mean(means)
|
||||
return mean, lower_bound, upper_bound
|
||||
|
||||
def calculate_confidence_interval(data):
|
||||
mean, lower, upper = bootstrap_confidence_interval(data)
|
||||
min_value = np.min(data)
|
||||
return mean, (upper - lower) / 2, min_value
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Run GPS benchmark tests.')
|
||||
parser.add_argument('--host', type=str, required=True, help='Host URL')
|
||||
parser.add_argument('--method', type=str, required=True, choices=['route', 'table', 'match', 'nearest', 'trip'], help='Benchmark method')
|
||||
parser.add_argument('--num_requests', type=int, required=True, help='Number of requests to perform')
|
||||
parser.add_argument('--iterations', type=int, required=True, help='Number of iterations to run the benchmark')
|
||||
parser.add_argument('--gps_traces_file_path', type=str, required=True, help='Path to the GPS traces file')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
np.random.seed(42)
|
||||
|
||||
runner = BenchmarkRunner(args.gps_traces_file_path)
|
||||
|
||||
all_times = []
|
||||
for _ in range(args.iterations):
|
||||
random.seed(42)
|
||||
|
||||
runner = BenchmarkRunner()
|
||||
times = runner.run(args.method, args.host, args.num_requests)
|
||||
all_times.append(times)
|
||||
all_times = np.asarray(all_times)
|
||||
|
||||
print(f'Total: {np.sum(times)}ms')
|
||||
print(f"Min time: {np.min(times)}ms")
|
||||
print(f"Mean time: {np.mean(times)}ms")
|
||||
print(f"Median time: {np.median(times)}ms")
|
||||
print(f"95th percentile: {np.percentile(times, 95)}ms")
|
||||
print(f"99th percentile: {np.percentile(times, 99)}ms")
|
||||
print(f"Max time: {np.max(times)}ms")
|
||||
assert all_times.shape == (args.iterations, all_times.shape[1])
|
||||
|
||||
|
||||
total_time, total_ci, total_best = calculate_confidence_interval(np.sum(all_times, axis=1))
|
||||
ops_per_sec, ops_per_sec_ci, ops_per_sec_best = calculate_confidence_interval(float(all_times.shape[1]) / np.sum(all_times / 1000, axis=1))
|
||||
min_time, min_ci, _ = calculate_confidence_interval(np.min(all_times, axis=1))
|
||||
mean_time, mean_ci, _ = calculate_confidence_interval(np.mean(all_times, axis=1))
|
||||
median_time, median_ci, _ = calculate_confidence_interval(np.median(all_times, axis=1))
|
||||
perc_95_time, perc_95_ci, _ = calculate_confidence_interval(np.percentile(all_times, 95, axis=1))
|
||||
perc_99_time, perc_99_ci, _ = calculate_confidence_interval(np.percentile(all_times, 99, axis=1))
|
||||
max_time, max_ci, _ = calculate_confidence_interval(np.max(all_times, axis=1))
|
||||
|
||||
print(f'Ops: {ops_per_sec:.2f} ± {ops_per_sec_ci:.2f} ops/s. Best: {ops_per_sec_best:.2f} ops/s')
|
||||
print(f'Total: {total_time:.2f}ms ± {total_ci:.2f}ms. Best: {total_best:.2f}ms')
|
||||
print(f"Min time: {min_time:.2f}ms ± {min_ci:.2f}ms")
|
||||
print(f"Mean time: {mean_time:.2f}ms ± {mean_ci:.2f}ms")
|
||||
print(f"Median time: {median_time:.2f}ms ± {median_ci:.2f}ms")
|
||||
print(f"95th percentile: {perc_95_time:.2f}ms ± {perc_95_ci:.2f}ms")
|
||||
print(f"99th percentile: {perc_99_time:.2f}ms ± {perc_99_ci:.2f}ms")
|
||||
print(f"Max time: {max_time:.2f}ms ± {max_ci:.2f}ms")
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -1,72 +1,121 @@
|
||||
#!/bin/bash
|
||||
set -eou pipefail
|
||||
|
||||
function usage {
|
||||
echo "Usage: $0 -f <folder> -r <results_folder> -s <scripts_folder> -b <binaries_folder> -o <osm_pbf> -g <gps_traces>"
|
||||
exit 1
|
||||
}
|
||||
|
||||
while getopts ":f:r:s:b:o:g:" opt; do
|
||||
case $opt in
|
||||
f) FOLDER="$OPTARG"
|
||||
;;
|
||||
r) RESULTS_FOLDER="$OPTARG"
|
||||
;;
|
||||
s) SCRIPTS_FOLDER="$OPTARG"
|
||||
;;
|
||||
b) BINARIES_FOLDER="$OPTARG"
|
||||
;;
|
||||
o) OSM_PBF="$OPTARG"
|
||||
;;
|
||||
g) GPS_TRACES="$OPTARG"
|
||||
;;
|
||||
\?) echo "Invalid option -$OPTARG" >&2
|
||||
usage
|
||||
;;
|
||||
:) echo "Option -$OPTARG requires an argument." >&2
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z "${FOLDER:-}" ] || [ -z "${RESULTS_FOLDER:-}" ] || [ -z "${SCRIPTS_FOLDER:-}" ] || [ -z "${BINARIES_FOLDER:-}" ] || [ -z "${OSM_PBF:-}" ] || [ -z "${GPS_TRACES:-}" ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
function measure_peak_ram_and_time {
|
||||
COMMAND=$1
|
||||
OUTPUT_FILE=$2
|
||||
|
||||
if [ "$(uname)" == "Darwin" ]; then
|
||||
# on macOS time has different parameters, so simply run command on macOS
|
||||
$COMMAND > /dev/null 2>&1
|
||||
else
|
||||
OUTPUT=$(/usr/bin/time -f "%e %M" $COMMAND 2>&1 | tail -n 1)
|
||||
|
||||
TIME=$(echo $OUTPUT | awk '{print $1}')
|
||||
PEAK_RAM_KB=$(echo $OUTPUT | awk '{print $2}')
|
||||
PEAK_RAM_MB=$(echo "scale=2; $PEAK_RAM_KB / 1024" | bc)
|
||||
echo "Time: ${TIME}s Peak RAM: ${PEAK_RAM_MB}MB" > $OUTPUT_FILE
|
||||
fi
|
||||
}
|
||||
|
||||
function run_benchmarks_for_folder {
|
||||
echo "Running benchmarks for $1"
|
||||
|
||||
FOLDER=$1
|
||||
RESULTS_FOLDER=$2
|
||||
SCRIPTS_FOLDER=$3
|
||||
|
||||
mkdir -p $RESULTS_FOLDER
|
||||
|
||||
BENCHMARKS_FOLDER="$FOLDER/build/src/benchmarks"
|
||||
BENCHMARKS_FOLDER="$BINARIES_FOLDER/src/benchmarks"
|
||||
echo "Running match-bench MLD"
|
||||
$BENCHMARKS_FOLDER/match-bench "$FOLDER/test/data/mld/monaco.osrm" mld > "$RESULTS_FOLDER/match_mld.bench"
|
||||
echo "Running match-bench CH"
|
||||
$BENCHMARKS_FOLDER/match-bench "$FOLDER/test/data/ch/monaco.osrm" ch > "$RESULTS_FOLDER/match_ch.bench"
|
||||
echo "Running route-bench MLD"
|
||||
$BENCHMARKS_FOLDER/route-bench "$FOLDER/test/data/mld/monaco.osrm" mld > "$RESULTS_FOLDER/route_mld.bench"
|
||||
echo "Running route-bench CH"
|
||||
$BENCHMARKS_FOLDER/route-bench "$FOLDER/test/data/ch/monaco.osrm" ch > "$RESULTS_FOLDER/route_ch.bench"
|
||||
echo "Running alias"
|
||||
$BENCHMARKS_FOLDER/alias-bench > "$RESULTS_FOLDER/alias.bench"
|
||||
echo "Running json-render-bench"
|
||||
$BENCHMARKS_FOLDER/json-render-bench "$FOLDER/src/benchmarks/portugal_to_korea.json" > "$RESULTS_FOLDER/json-render.bench"
|
||||
echo "Running packedvector-bench"
|
||||
$BENCHMARKS_FOLDER/packedvector-bench > "$RESULTS_FOLDER/packedvector.bench"
|
||||
echo "Running rtree-bench"
|
||||
$BENCHMARKS_FOLDER/rtree-bench "$FOLDER/test/data/monaco.osrm.ramIndex" "$FOLDER/test/data/monaco.osrm.fileIndex" "$FOLDER/test/data/monaco.osrm.nbg_nodes" > "$RESULTS_FOLDER/rtree.bench"
|
||||
|
||||
./$BENCHMARKS_FOLDER/match-bench "./$FOLDER/test/data/mld/monaco.osrm" mld > "$RESULTS_FOLDER/match_mld.bench"
|
||||
./$BENCHMARKS_FOLDER/match-bench "./$FOLDER/test/data/ch/monaco.osrm" ch > "$RESULTS_FOLDER/match_ch.bench"
|
||||
./$BENCHMARKS_FOLDER/route-bench "./$FOLDER/test/data/mld/monaco.osrm" mld > "$RESULTS_FOLDER/route_mld.bench"
|
||||
./$BENCHMARKS_FOLDER/route-bench "./$FOLDER/test/data/ch/monaco.osrm" ch > "$RESULTS_FOLDER/route_ch.bench"
|
||||
./$BENCHMARKS_FOLDER/alias-bench > "$RESULTS_FOLDER/alias.bench"
|
||||
./$BENCHMARKS_FOLDER/json-render-bench "./$FOLDER/src/benchmarks/portugal_to_korea.json" > "$RESULTS_FOLDER/json-render.bench"
|
||||
./$BENCHMARKS_FOLDER/packedvector-bench > "$RESULTS_FOLDER/packedvector.bench"
|
||||
./$BENCHMARKS_FOLDER/rtree-bench "./$FOLDER/test/data/monaco.osrm.ramIndex" "./$FOLDER/test/data/monaco.osrm.fileIndex" "./$FOLDER/test/data/monaco.osrm.nbg_nodes" > "$RESULTS_FOLDER/rtree.bench"
|
||||
|
||||
BINARIES_FOLDER="$FOLDER/build"
|
||||
|
||||
cp ~/data.osm.pbf $FOLDER
|
||||
cp -rf $OSM_PBF $FOLDER/data.osm.pbf
|
||||
|
||||
echo "Running osrm-extract"
|
||||
measure_peak_ram_and_time "$BINARIES_FOLDER/osrm-extract -p $FOLDER/profiles/car.lua $FOLDER/data.osm.pbf" "$RESULTS_FOLDER/osrm_extract.bench"
|
||||
echo "Running osrm-partition"
|
||||
measure_peak_ram_and_time "$BINARIES_FOLDER/osrm-partition $FOLDER/data.osrm" "$RESULTS_FOLDER/osrm_partition.bench"
|
||||
echo "Running osrm-customize"
|
||||
measure_peak_ram_and_time "$BINARIES_FOLDER/osrm-customize $FOLDER/data.osrm" "$RESULTS_FOLDER/osrm_customize.bench"
|
||||
echo "Running osrm-contract"
|
||||
measure_peak_ram_and_time "$BINARIES_FOLDER/osrm-contract $FOLDER/data.osrm" "$RESULTS_FOLDER/osrm_contract.bench"
|
||||
|
||||
for ALGORITHM in ch mld; do
|
||||
for BENCH in nearest table trip route match; do
|
||||
./$BENCHMARKS_FOLDER/bench "$FOLDER/data.osrm" mld ~/gps_traces.csv ${BENCH} > "$RESULTS_FOLDER/random_${BENCH}_mld.bench" || true
|
||||
./$BENCHMARKS_FOLDER/bench "$FOLDER/data.osrm" ch ~/gps_traces.csv ${BENCH} > "$RESULTS_FOLDER/random_${BENCH}_ch.bench" || true
|
||||
echo "Running random $BENCH $ALGORITHM"
|
||||
START=$(date +%s.%N)
|
||||
$BENCHMARKS_FOLDER/bench "$FOLDER/data.osrm" $ALGORITHM $GPS_TRACES ${BENCH} > "$RESULTS_FOLDER/random_${BENCH}_${ALGORITHM}.bench" 5 || true
|
||||
END=$(date +%s.%N)
|
||||
DIFF=$(echo "$END - $START" | bc)
|
||||
echo "Took: ${DIFF}s"
|
||||
done
|
||||
done
|
||||
|
||||
|
||||
for ALGORITHM in ch mld; do
|
||||
$BINARIES_FOLDER/osrm-routed --algorithm $ALGORITHM $FOLDER/data.osrm &
|
||||
$BINARIES_FOLDER/osrm-routed --algorithm $ALGORITHM $FOLDER/data.osrm > /dev/null 2>&1 &
|
||||
OSRM_ROUTED_PID=$!
|
||||
|
||||
# wait for osrm-routed to start
|
||||
if ! curl --retry-delay 3 --retry 10 --retry-all-errors "http://127.0.0.1:5000/route/v1/driving/13.388860,52.517037;13.385983,52.496891?steps=true"; then
|
||||
if ! curl --retry-delay 3 --retry 10 --retry-all-errors "http://127.0.0.1:5000/route/v1/driving/13.388860,52.517037;13.385983,52.496891?steps=true" > /dev/null 2>&1; then
|
||||
echo "osrm-routed failed to start for algorithm $ALGORITHM"
|
||||
kill -9 $OSRM_ROUTED_PID
|
||||
continue
|
||||
fi
|
||||
|
||||
for METHOD in route nearest trip table match; do
|
||||
python3 $SCRIPTS_FOLDER/scripts/ci/e2e_benchmark.py --host http://localhost:5000 --method $METHOD --num_requests 1000 > $RESULTS_FOLDER/e2e_${METHOD}_${ALGORITHM}.bench
|
||||
echo "Running e2e benchmark for $METHOD $ALGORITHM"
|
||||
START=$(date +%s.%N)
|
||||
python3 $SCRIPTS_FOLDER/scripts/ci/e2e_benchmark.py --host http://localhost:5000 --method $METHOD --iterations 5 --num_requests 1000 --gps_traces_file_path $GPS_TRACES > $RESULTS_FOLDER/e2e_${METHOD}_${ALGORITHM}.bench
|
||||
END=$(date +%s.%N)
|
||||
DIFF=$(echo "$END - $START" | bc)
|
||||
echo "Took: ${DIFF}s"
|
||||
done
|
||||
|
||||
kill -9 $OSRM_ROUTED_PID
|
||||
done
|
||||
}
|
||||
|
||||
run_benchmarks_for_folder $1 "${1}_results" $2
|
||||
run_benchmarks_for_folder $2 "${2}_results" $2
|
||||
run_benchmarks_for_folder
|
||||
|
||||
|
@ -45,8 +45,12 @@ class GPSTraces
|
||||
std::vector<osrm::util::Coordinate> coordinates;
|
||||
mutable std::mt19937 gen;
|
||||
|
||||
int seed;
|
||||
|
||||
public:
|
||||
GPSTraces(int seed) : gen(std::random_device{}()) { gen.seed(seed); }
|
||||
GPSTraces(int seed) : gen(std::random_device{}()), seed(seed) { gen.seed(seed); }
|
||||
|
||||
void resetSeed() const { gen.seed(seed); }
|
||||
|
||||
bool readCSV(const std::string &filename)
|
||||
{
|
||||
@ -101,75 +105,200 @@ class GPSTraces
|
||||
return coordinates[dis(gen)];
|
||||
}
|
||||
|
||||
const std::vector<osrm::util::Coordinate> &getRandomTrace() const
|
||||
std::vector<osrm::util::Coordinate> getRandomTrace() const
|
||||
{
|
||||
std::uniform_int_distribution<> dis(0, trackIDs.size() - 1);
|
||||
auto it = trackIDs.begin();
|
||||
std::advance(it, dis(gen));
|
||||
return traces.at(*it);
|
||||
|
||||
const auto &trace = traces.at(*it);
|
||||
|
||||
std::uniform_int_distribution<> length_dis(50, 100);
|
||||
size_t length = length_dis(gen);
|
||||
if (trace.size() <= length + 1)
|
||||
{
|
||||
return trace;
|
||||
}
|
||||
|
||||
std::uniform_int_distribution<> start_dis(0, trace.size() - length - 1);
|
||||
size_t start_index = start_dis(gen);
|
||||
|
||||
return std::vector<osrm::util::Coordinate>(trace.begin() + start_index,
|
||||
trace.begin() + start_index + length);
|
||||
}
|
||||
};
|
||||
|
||||
// Struct to hold confidence interval data
|
||||
struct ConfidenceInterval
|
||||
{
|
||||
double mean;
|
||||
double confidence;
|
||||
double min;
|
||||
};
|
||||
|
||||
// Helper function to calculate the bootstrap confidence interval
|
||||
ConfidenceInterval confidenceInterval(const std::vector<double> &data,
|
||||
int num_samples = 1000,
|
||||
double confidence_level = 0.95)
|
||||
{
|
||||
std::vector<double> means;
|
||||
std::default_random_engine generator;
|
||||
std::uniform_int_distribution<int> distribution(0, data.size() - 1);
|
||||
|
||||
for (int i = 0; i < num_samples; ++i)
|
||||
{
|
||||
std::vector<double> sample;
|
||||
for (size_t j = 0; j < data.size(); ++j)
|
||||
{
|
||||
sample.push_back(data[distribution(generator)]);
|
||||
}
|
||||
double sample_mean = std::accumulate(sample.begin(), sample.end(), 0.0) / sample.size();
|
||||
means.push_back(sample_mean);
|
||||
}
|
||||
|
||||
std::sort(means.begin(), means.end());
|
||||
double lower_bound = means[(int)((1 - confidence_level) / 2 * num_samples)];
|
||||
double upper_bound = means[(int)((1 + confidence_level) / 2 * num_samples)];
|
||||
double mean = std::accumulate(means.begin(), means.end(), 0.0) / means.size();
|
||||
|
||||
ConfidenceInterval ci = {
|
||||
mean, (upper_bound - lower_bound) / 2, *std::min_element(data.begin(), data.end())};
|
||||
return ci;
|
||||
}
|
||||
|
||||
class Statistics
|
||||
{
|
||||
public:
|
||||
void push(double timeMs)
|
||||
explicit Statistics(int iterations) : times(iterations) {}
|
||||
|
||||
void push(double timeMs, int iteration) { times[iteration].push_back(timeMs); }
|
||||
|
||||
ConfidenceInterval mean()
|
||||
{
|
||||
times.push_back(timeMs);
|
||||
sorted = false;
|
||||
std::vector<double> means;
|
||||
means.reserve(times.size());
|
||||
for (const auto &iter_times : times)
|
||||
{
|
||||
means.push_back(std::accumulate(iter_times.begin(), iter_times.end(), 0.0) /
|
||||
iter_times.size());
|
||||
}
|
||||
return confidenceInterval(means);
|
||||
}
|
||||
|
||||
double mean() { return sum() / times.size(); }
|
||||
|
||||
double sum()
|
||||
ConfidenceInterval total()
|
||||
{
|
||||
double sum = 0;
|
||||
for (auto time : times)
|
||||
std::vector<double> sums;
|
||||
sums.reserve(times.size());
|
||||
for (const auto &iter_times : times)
|
||||
{
|
||||
sum += time;
|
||||
sums.push_back(std::accumulate(iter_times.begin(), iter_times.end(), 0.0));
|
||||
}
|
||||
return sum;
|
||||
return confidenceInterval(sums);
|
||||
}
|
||||
|
||||
double min() { return *std::min_element(times.begin(), times.end()); }
|
||||
|
||||
double max() { return *std::max_element(times.begin(), times.end()); }
|
||||
|
||||
double percentile(double p)
|
||||
ConfidenceInterval min()
|
||||
{
|
||||
const auto × = getTimes();
|
||||
return times[static_cast<size_t>(p * times.size())];
|
||||
std::vector<double> mins;
|
||||
mins.reserve(times.size());
|
||||
for (const auto &iter_times : times)
|
||||
{
|
||||
mins.push_back(*std::min_element(iter_times.begin(), iter_times.end()));
|
||||
}
|
||||
return confidenceInterval(mins);
|
||||
}
|
||||
|
||||
ConfidenceInterval max()
|
||||
{
|
||||
std::vector<double> maxs;
|
||||
maxs.reserve(times.size());
|
||||
for (const auto &iter_times : times)
|
||||
{
|
||||
maxs.push_back(*std::max_element(iter_times.begin(), iter_times.end()));
|
||||
}
|
||||
return confidenceInterval(maxs);
|
||||
}
|
||||
|
||||
ConfidenceInterval percentile(double p)
|
||||
{
|
||||
std::vector<double> percentiles;
|
||||
percentiles.reserve(times.size());
|
||||
for (const auto &iter_times : times)
|
||||
{
|
||||
auto sorted_times = iter_times;
|
||||
std::sort(sorted_times.begin(), sorted_times.end());
|
||||
percentiles.push_back(sorted_times[static_cast<size_t>(p * sorted_times.size())]);
|
||||
}
|
||||
return confidenceInterval(percentiles);
|
||||
}
|
||||
|
||||
ConfidenceInterval ops_per_sec()
|
||||
{
|
||||
std::vector<double> ops;
|
||||
ops.reserve(times.size());
|
||||
for (const auto &iter_times : times)
|
||||
{
|
||||
double total_time = std::accumulate(iter_times.begin(), iter_times.end(), 0.0) / 1000.0;
|
||||
ops.push_back(iter_times.size() / total_time);
|
||||
}
|
||||
return confidenceInterval(ops);
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<double> getTimes()
|
||||
{
|
||||
if (!sorted)
|
||||
{
|
||||
std::sort(times.begin(), times.end());
|
||||
sorted = true;
|
||||
}
|
||||
return times;
|
||||
}
|
||||
|
||||
std::vector<double> times;
|
||||
|
||||
bool sorted = false;
|
||||
// vector of times for each iteration
|
||||
std::vector<std::vector<double>> times;
|
||||
};
|
||||
|
||||
std::ostream &operator<<(std::ostream &os, Statistics &statistics)
|
||||
{
|
||||
os << std::fixed << std::setprecision(2);
|
||||
os << "total: " << statistics.sum() << "ms" << std::endl;
|
||||
os << "avg: " << statistics.mean() << "ms" << std::endl;
|
||||
os << "min: " << statistics.min() << "ms" << std::endl;
|
||||
os << "max: " << statistics.max() << "ms" << std::endl;
|
||||
os << "p99: " << statistics.percentile(0.99) << "ms" << std::endl;
|
||||
|
||||
ConfidenceInterval mean_ci = statistics.mean();
|
||||
ConfidenceInterval total_ci = statistics.total();
|
||||
ConfidenceInterval min_ci = statistics.min();
|
||||
ConfidenceInterval max_ci = statistics.max();
|
||||
ConfidenceInterval p99_ci = statistics.percentile(0.99);
|
||||
ConfidenceInterval ops_ci = statistics.ops_per_sec();
|
||||
|
||||
os << "ops: " << ops_ci.mean << " ± " << ops_ci.confidence << " ops/s. "
|
||||
<< "best: " << ops_ci.min << "ops/s." << std::endl;
|
||||
os << "total: " << total_ci.mean << " ± " << total_ci.confidence << "ms. "
|
||||
<< "best: " << total_ci.min << "ms." << std::endl;
|
||||
os << "avg: " << mean_ci.mean << " ± " << mean_ci.confidence << "ms" << std::endl;
|
||||
os << "min: " << min_ci.mean << " ± " << min_ci.confidence << "ms" << std::endl;
|
||||
os << "max: " << max_ci.mean << " ± " << max_ci.confidence << "ms" << std::endl;
|
||||
os << "p99: " << p99_ci.mean << " ± " << p99_ci.confidence << "ms" << std::endl;
|
||||
|
||||
return os;
|
||||
}
|
||||
|
||||
void runRouteBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
template <typename Benchmark, typename BenchmarkBody>
|
||||
void runBenchmarks(const std::vector<Benchmark> &benchmarks,
|
||||
int iterations,
|
||||
int opsPerIteration,
|
||||
const OSRM &osrm,
|
||||
const GPSTraces &gpsTraces,
|
||||
const BenchmarkBody &benchmarkBody)
|
||||
{
|
||||
for (const auto &benchmark : benchmarks)
|
||||
{
|
||||
Statistics statistics{iterations};
|
||||
for (int iteration = 0; iteration < iterations; ++iteration)
|
||||
{
|
||||
gpsTraces.resetSeed();
|
||||
|
||||
for (int i = 0; i < opsPerIteration; ++i)
|
||||
{
|
||||
benchmarkBody(iteration, benchmark, osrm, gpsTraces, statistics);
|
||||
}
|
||||
}
|
||||
std::cout << benchmark.name << std::endl;
|
||||
std::cout << statistics << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
void runRouteBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces, int iterations)
|
||||
{
|
||||
|
||||
struct Benchmark
|
||||
{
|
||||
std::string name;
|
||||
@ -179,13 +308,38 @@ void runRouteBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
std::optional<size_t> alternatives = std::nullopt;
|
||||
std::optional<double> radius = std::nullopt;
|
||||
};
|
||||
std::vector<Benchmark> benchmarks = {
|
||||
{"1000 routes, 3 coordinates, no alternatives, overview=full, steps=true",
|
||||
3,
|
||||
RouteParameters::OverviewType::Full,
|
||||
true,
|
||||
std::nullopt},
|
||||
{"1000 routes, 2 coordinates, 3 alternatives, overview=full, steps=true",
|
||||
2,
|
||||
RouteParameters::OverviewType::Full,
|
||||
true,
|
||||
3},
|
||||
{"1000 routes, 3 coordinates, no alternatives, overview=false, steps=false",
|
||||
3,
|
||||
RouteParameters::OverviewType::False,
|
||||
false,
|
||||
std::nullopt},
|
||||
{"1000 routes, 2 coordinates, 3 alternatives, overview=false, steps=false",
|
||||
2,
|
||||
RouteParameters::OverviewType::False,
|
||||
false,
|
||||
3}};
|
||||
|
||||
auto run_benchmark = [&](const Benchmark &benchmark)
|
||||
{
|
||||
Statistics statistics;
|
||||
|
||||
auto NUM = 10000;
|
||||
for (int i = 0; i < NUM; ++i)
|
||||
runBenchmarks(benchmarks,
|
||||
iterations,
|
||||
1000,
|
||||
osrm,
|
||||
gpsTraces,
|
||||
[](int iteration,
|
||||
const Benchmark &benchmark,
|
||||
const OSRM &osrm,
|
||||
const GPSTraces &gpsTraces,
|
||||
Statistics &statistics)
|
||||
{
|
||||
RouteParameters params;
|
||||
params.overview = benchmark.overview;
|
||||
@ -212,10 +366,9 @@ void runRouteBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
const auto rc = osrm.Route(params, result);
|
||||
TIMER_STOP(routes);
|
||||
|
||||
statistics.push(TIMER_MSEC(routes));
|
||||
|
||||
auto &json_result = std::get<json::Object>(result);
|
||||
if (rc != Status::Ok || json_result.values.find("routes") == json_result.values.end())
|
||||
if (rc != Status::Ok ||
|
||||
json_result.values.find("routes") == json_result.values.end())
|
||||
{
|
||||
auto code = std::get<json::String>(json_result.values["code"]).value;
|
||||
if (code != "NoSegment" && code != "NoRoute")
|
||||
@ -223,70 +376,15 @@ void runRouteBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
throw std::runtime_error{"Couldn't route: " + code};
|
||||
}
|
||||
}
|
||||
}
|
||||
std::cout << benchmark.name << std::endl;
|
||||
std::cout << statistics << std::endl;
|
||||
};
|
||||
|
||||
std::vector<Benchmark> benchmarks = {
|
||||
{"10000 routes, 3 coordinates, no alternatives, overview=full, steps=true",
|
||||
3,
|
||||
RouteParameters::OverviewType::Full,
|
||||
true,
|
||||
std::nullopt},
|
||||
{"10000 routes, 2 coordinates, no alternatives, overview=full, steps=true",
|
||||
2,
|
||||
RouteParameters::OverviewType::Full,
|
||||
true,
|
||||
std::nullopt},
|
||||
{"10000 routes, 2 coordinates, 3 alternatives, overview=full, steps=true",
|
||||
2,
|
||||
RouteParameters::OverviewType::Full,
|
||||
true,
|
||||
3},
|
||||
{"10000 routes, 3 coordinates, no alternatives, overview=false, steps=false",
|
||||
3,
|
||||
RouteParameters::OverviewType::False,
|
||||
false,
|
||||
std::nullopt},
|
||||
{"10000 routes, 2 coordinates, no alternatives, overview=false, steps=false",
|
||||
2,
|
||||
RouteParameters::OverviewType::False,
|
||||
false,
|
||||
std::nullopt},
|
||||
{"10000 routes, 2 coordinates, 3 alternatives, overview=false, steps=false",
|
||||
2,
|
||||
RouteParameters::OverviewType::False,
|
||||
false,
|
||||
3},
|
||||
{"10000 routes, 3 coordinates, no alternatives, overview=false, steps=false, radius=750",
|
||||
3,
|
||||
RouteParameters::OverviewType::False,
|
||||
false,
|
||||
std::nullopt,
|
||||
750},
|
||||
{"10000 routes, 2 coordinates, no alternatives, overview=false, steps=false, radius=750",
|
||||
2,
|
||||
RouteParameters::OverviewType::False,
|
||||
false,
|
||||
std::nullopt,
|
||||
750},
|
||||
{"10000 routes, 2 coordinates, 3 alternatives, overview=false, steps=false, radius=750",
|
||||
2,
|
||||
RouteParameters::OverviewType::False,
|
||||
false,
|
||||
3,
|
||||
750}
|
||||
|
||||
};
|
||||
|
||||
for (const auto &benchmark : benchmarks)
|
||||
else
|
||||
{
|
||||
run_benchmark(benchmark);
|
||||
|
||||
statistics.push(TIMER_MSEC(routes), iteration);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void runMatchBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
void runMatchBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces, int iterations)
|
||||
{
|
||||
struct Benchmark
|
||||
{
|
||||
@ -294,12 +392,20 @@ void runMatchBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
std::optional<size_t> radius = std::nullopt;
|
||||
};
|
||||
|
||||
auto run_benchmark = [&](const Benchmark &benchmark)
|
||||
{
|
||||
Statistics statistics;
|
||||
std::vector<Benchmark> benchmarks = {{"500 matches, default radius"},
|
||||
{"500 matches, radius=10", 10},
|
||||
{"500 matches, radius=20", 20}};
|
||||
|
||||
auto NUM = 1000;
|
||||
for (int i = 0; i < NUM; ++i)
|
||||
runBenchmarks(benchmarks,
|
||||
iterations,
|
||||
500,
|
||||
osrm,
|
||||
gpsTraces,
|
||||
[](int iteration,
|
||||
const Benchmark &benchmark,
|
||||
const OSRM &osrm,
|
||||
const GPSTraces &gpsTraces,
|
||||
Statistics &statistics)
|
||||
{
|
||||
engine::api::ResultT result = json::Object();
|
||||
|
||||
@ -318,8 +424,6 @@ void runMatchBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
const auto rc = osrm.Match(params, result);
|
||||
TIMER_STOP(match);
|
||||
|
||||
statistics.push(TIMER_MSEC(match));
|
||||
|
||||
auto &json_result = std::get<json::Object>(result);
|
||||
if (rc != Status::Ok ||
|
||||
json_result.values.find("matchings") == json_result.values.end())
|
||||
@ -330,23 +434,14 @@ void runMatchBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
throw std::runtime_error{"Couldn't route: " + code};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << benchmark.name << std::endl;
|
||||
std::cout << statistics << std::endl;
|
||||
};
|
||||
|
||||
std::vector<Benchmark> benchmarks = {{"1000 matches, default radius"},
|
||||
{"1000 matches, radius=10", 10},
|
||||
{"1000 matches, radius=20", 20}};
|
||||
|
||||
for (const auto &benchmark : benchmarks)
|
||||
else
|
||||
{
|
||||
run_benchmark(benchmark);
|
||||
statistics.push(TIMER_MSEC(match), iteration);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void runNearestBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
void runNearestBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces, int iterations)
|
||||
{
|
||||
struct Benchmark
|
||||
{
|
||||
@ -354,11 +449,20 @@ void runNearestBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
std::optional<size_t> number_of_results = std::nullopt;
|
||||
};
|
||||
|
||||
auto run_benchmark = [&](const Benchmark &benchmark)
|
||||
{
|
||||
Statistics statistics;
|
||||
auto NUM = 10000;
|
||||
for (int i = 0; i < NUM; ++i)
|
||||
std::vector<Benchmark> benchmarks = {{"10000 nearest, number_of_results=1", 1},
|
||||
{"10000 nearest, number_of_results=5", 5},
|
||||
{"10000 nearest, number_of_results=10", 10}};
|
||||
|
||||
runBenchmarks(benchmarks,
|
||||
iterations,
|
||||
10000,
|
||||
osrm,
|
||||
gpsTraces,
|
||||
[](int iteration,
|
||||
const Benchmark &benchmark,
|
||||
const OSRM &osrm,
|
||||
const GPSTraces &gpsTraces,
|
||||
Statistics &statistics)
|
||||
{
|
||||
engine::api::ResultT result = json::Object();
|
||||
NearestParameters params;
|
||||
@ -373,8 +477,6 @@ void runNearestBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
const auto rc = osrm.Nearest(params, result);
|
||||
TIMER_STOP(nearest);
|
||||
|
||||
statistics.push(TIMER_MSEC(nearest));
|
||||
|
||||
auto &json_result = std::get<json::Object>(result);
|
||||
if (rc != Status::Ok ||
|
||||
json_result.values.find("waypoints") == json_result.values.end())
|
||||
@ -385,23 +487,14 @@ void runNearestBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
throw std::runtime_error{"Couldn't find nearest point"};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << benchmark.name << std::endl;
|
||||
std::cout << statistics << std::endl;
|
||||
};
|
||||
|
||||
std::vector<Benchmark> benchmarks = {{"10000 nearest, number_of_results=1", 1},
|
||||
{"10000 nearest, number_of_results=5", 5},
|
||||
{"10000 nearest, number_of_results=10", 10}};
|
||||
|
||||
for (const auto &benchmark : benchmarks)
|
||||
else
|
||||
{
|
||||
run_benchmark(benchmark);
|
||||
statistics.push(TIMER_MSEC(nearest), iteration);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void runTripBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
void runTripBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces, int iterations)
|
||||
{
|
||||
struct Benchmark
|
||||
{
|
||||
@ -409,11 +502,21 @@ void runTripBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
size_t coordinates;
|
||||
};
|
||||
|
||||
auto run_benchmark = [&](const Benchmark &benchmark)
|
||||
{
|
||||
Statistics statistics;
|
||||
auto NUM = 1000;
|
||||
for (int i = 0; i < NUM; ++i)
|
||||
std::vector<Benchmark> benchmarks = {
|
||||
{"250 trips, 3 coordinates", 3},
|
||||
{"250 trips, 5 coordinates", 5},
|
||||
};
|
||||
|
||||
runBenchmarks(benchmarks,
|
||||
iterations,
|
||||
250,
|
||||
osrm,
|
||||
gpsTraces,
|
||||
[](int iteration,
|
||||
const Benchmark &benchmark,
|
||||
const OSRM &osrm,
|
||||
const GPSTraces &gpsTraces,
|
||||
Statistics &statistics)
|
||||
{
|
||||
engine::api::ResultT result = json::Object();
|
||||
TripParameters params;
|
||||
@ -428,10 +531,9 @@ void runTripBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
const auto rc = osrm.Trip(params, result);
|
||||
TIMER_STOP(trip);
|
||||
|
||||
statistics.push(TIMER_MSEC(trip));
|
||||
|
||||
auto &json_result = std::get<json::Object>(result);
|
||||
if (rc != Status::Ok || json_result.values.find("trips") == json_result.values.end())
|
||||
if (rc != Status::Ok ||
|
||||
json_result.values.find("trips") == json_result.values.end())
|
||||
{
|
||||
auto code = std::get<json::String>(json_result.values["code"]).value;
|
||||
if (code != "NoSegment")
|
||||
@ -439,24 +541,13 @@ void runTripBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
throw std::runtime_error{"Couldn't find trip"};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << benchmark.name << std::endl;
|
||||
std::cout << statistics << std::endl;
|
||||
};
|
||||
|
||||
std::vector<Benchmark> benchmarks = {
|
||||
{"1000 trips, 3 coordinates", 3},
|
||||
{"1000 trips, 4 coordinates", 4},
|
||||
{"1000 trips, 5 coordinates", 5},
|
||||
};
|
||||
|
||||
for (const auto &benchmark : benchmarks)
|
||||
else
|
||||
{
|
||||
run_benchmark(benchmark);
|
||||
statistics.push(TIMER_MSEC(trip), iteration);
|
||||
}
|
||||
});
|
||||
}
|
||||
void runTableBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
void runTableBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces, int iterations)
|
||||
{
|
||||
struct Benchmark
|
||||
{
|
||||
@ -464,11 +555,20 @@ void runTableBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
size_t coordinates;
|
||||
};
|
||||
|
||||
auto run_benchmark = [&](const Benchmark &benchmark)
|
||||
{
|
||||
Statistics statistics;
|
||||
auto NUM = 250;
|
||||
for (int i = 0; i < NUM; ++i)
|
||||
std::vector<Benchmark> benchmarks = {{"250 tables, 3 coordinates", 3},
|
||||
{"250 tables, 25 coordinates", 25},
|
||||
{"250 tables, 50 coordinates", 50}};
|
||||
|
||||
runBenchmarks(benchmarks,
|
||||
iterations,
|
||||
250,
|
||||
osrm,
|
||||
gpsTraces,
|
||||
[](int iteration,
|
||||
const Benchmark &benchmark,
|
||||
const OSRM &osrm,
|
||||
const GPSTraces &gpsTraces,
|
||||
Statistics &statistics)
|
||||
{
|
||||
engine::api::ResultT result = json::Object();
|
||||
TableParameters params;
|
||||
@ -482,7 +582,7 @@ void runTableBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
const auto rc = osrm.Table(params, result);
|
||||
TIMER_STOP(table);
|
||||
|
||||
statistics.push(TIMER_MSEC(table));
|
||||
statistics.push(TIMER_MSEC(table), iteration);
|
||||
|
||||
auto &json_result = std::get<json::Object>(result);
|
||||
if (rc != Status::Ok ||
|
||||
@ -494,21 +594,7 @@ void runTableBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
throw std::runtime_error{"Couldn't compute table"};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << benchmark.name << std::endl;
|
||||
std::cout << statistics << std::endl;
|
||||
};
|
||||
|
||||
std::vector<Benchmark> benchmarks = {{"250 tables, 3 coordinates", 3},
|
||||
{"250 tables, 25 coordinates", 25},
|
||||
{"250 tables, 50 coordinates", 50},
|
||||
{"250 tables, 100 coordinates", 100}};
|
||||
|
||||
for (const auto &benchmark : benchmarks)
|
||||
{
|
||||
run_benchmark(benchmark);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace
|
||||
@ -516,11 +602,11 @@ void runTableBenchmark(const OSRM &osrm, const GPSTraces &gpsTraces)
|
||||
int main(int argc, const char *argv[])
|
||||
try
|
||||
{
|
||||
if (argc < 5)
|
||||
if (argc < 6)
|
||||
{
|
||||
std::cerr
|
||||
<< "Usage: " << argv[0]
|
||||
<< " data.osrm <mld|ch> <path to GPS traces.csv> <route|match|trip|table|nearest>\n";
|
||||
std::cerr << "Usage: " << argv[0]
|
||||
<< " data.osrm <mld|ch> <path to GPS traces.csv> "
|
||||
"<route|match|trip|table|nearest> <number_of_iterations>\n";
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
@ -537,27 +623,29 @@ try
|
||||
GPSTraces gpsTraces{42};
|
||||
gpsTraces.readCSV(argv[3]);
|
||||
|
||||
int iterations = std::stoi(argv[5]);
|
||||
|
||||
const auto benchmarkToRun = std::string{argv[4]};
|
||||
|
||||
if (benchmarkToRun == "route")
|
||||
{
|
||||
runRouteBenchmark(osrm, gpsTraces);
|
||||
runRouteBenchmark(osrm, gpsTraces, iterations);
|
||||
}
|
||||
else if (benchmarkToRun == "match")
|
||||
{
|
||||
runMatchBenchmark(osrm, gpsTraces);
|
||||
runMatchBenchmark(osrm, gpsTraces, iterations);
|
||||
}
|
||||
else if (benchmarkToRun == "nearest")
|
||||
{
|
||||
runNearestBenchmark(osrm, gpsTraces);
|
||||
runNearestBenchmark(osrm, gpsTraces, iterations);
|
||||
}
|
||||
else if (benchmarkToRun == "trip")
|
||||
{
|
||||
runTripBenchmark(osrm, gpsTraces);
|
||||
runTripBenchmark(osrm, gpsTraces, iterations);
|
||||
}
|
||||
else if (benchmarkToRun == "table")
|
||||
{
|
||||
runTableBenchmark(osrm, gpsTraces);
|
||||
runTableBenchmark(osrm, gpsTraces, iterations);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -254,7 +254,7 @@ try
|
||||
<< std::endl;
|
||||
};
|
||||
|
||||
for (auto radius : std::vector<std::optional<double>>{std::nullopt, 5.0, 10.0, 15.0, 30.0})
|
||||
for (auto radius : std::vector<std::optional<double>>{std::nullopt, 10.0})
|
||||
{
|
||||
run_benchmark(radius);
|
||||
}
|
||||
|
@ -96,12 +96,6 @@ try
|
||||
RouteParameters::OverviewType::Full,
|
||||
true,
|
||||
std::nullopt},
|
||||
{"1000 routes, 2 coordinates, no alternatives, overview=full, steps=true",
|
||||
{{FloatLongitude{7.437602352715465}, FloatLatitude{43.75030522209604}},
|
||||
{FloatLongitude{7.412303912230966}, FloatLatitude{43.72851046529198}}},
|
||||
RouteParameters::OverviewType::Full,
|
||||
true,
|
||||
std::nullopt},
|
||||
{"1000 routes, 2 coordinates, 3 alternatives, overview=full, steps=true",
|
||||
{{FloatLongitude{7.437602352715465}, FloatLatitude{43.75030522209604}},
|
||||
{FloatLongitude{7.412303912230966}, FloatLatitude{43.72851046529198}}},
|
||||
@ -115,40 +109,12 @@ try
|
||||
RouteParameters::OverviewType::False,
|
||||
false,
|
||||
std::nullopt},
|
||||
{"1000 routes, 2 coordinates, no alternatives, overview=false, steps=false",
|
||||
{{FloatLongitude{7.437602352715465}, FloatLatitude{43.75030522209604}},
|
||||
{FloatLongitude{7.412303912230966}, FloatLatitude{43.72851046529198}}},
|
||||
RouteParameters::OverviewType::False,
|
||||
false,
|
||||
std::nullopt},
|
||||
{"1000 routes, 2 coordinates, 3 alternatives, overview=false, steps=false",
|
||||
{{FloatLongitude{7.437602352715465}, FloatLatitude{43.75030522209604}},
|
||||
{FloatLongitude{7.412303912230966}, FloatLatitude{43.72851046529198}}},
|
||||
RouteParameters::OverviewType::False,
|
||||
false,
|
||||
3},
|
||||
{"1000 routes, 3 coordinates, no alternatives, overview=false, steps=false, radius=750",
|
||||
{{FloatLongitude{7.437602352715465}, FloatLatitude{43.75030522209604}},
|
||||
{FloatLongitude{7.421844922513342}, FloatLatitude{43.73690777888953}},
|
||||
{FloatLongitude{7.412303912230966}, FloatLatitude{43.72851046529198}}},
|
||||
RouteParameters::OverviewType::False,
|
||||
false,
|
||||
std::nullopt,
|
||||
750},
|
||||
{"1000 routes, 2 coordinates, no alternatives, overview=false, steps=false, radius=750",
|
||||
{{FloatLongitude{7.437602352715465}, FloatLatitude{43.75030522209604}},
|
||||
{FloatLongitude{7.412303912230966}, FloatLatitude{43.72851046529198}}},
|
||||
RouteParameters::OverviewType::False,
|
||||
false,
|
||||
std::nullopt,
|
||||
750},
|
||||
{"1000 routes, 2 coordinates, 3 alternatives, overview=false, steps=false, radius=750",
|
||||
{{FloatLongitude{7.437602352715465}, FloatLatitude{43.75030522209604}},
|
||||
{FloatLongitude{7.412303912230966}, FloatLatitude{43.72851046529198}}},
|
||||
RouteParameters::OverviewType::False,
|
||||
false,
|
||||
3,
|
||||
750}
|
||||
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user