Merge pull request #3788 from Project-OSRM/mld/routing
MLD shortest direct path search
This commit is contained in:
commit
af65ccd054
@ -2,8 +2,6 @@ module.exports = {
|
||||
default: '--strict --tags ~@stress --tags ~@todo --require features/support --require features/step_definitions',
|
||||
verify: '--strict --tags ~@stress --tags ~@todo -f progress --require features/support --require features/step_definitions',
|
||||
todo: '--strict --tags @todo --require features/support --require features/step_definitions',
|
||||
all: '--strict --require features/support --require features/step_definitions'
|
||||
all: '--strict --require features/support --require features/step_definitions',
|
||||
mld: '--strict --tags ~@stress --tags ~@todo --tags ~@match --tags ~@alternative --tags ~@matrix --tags ~@trip --tags ~@via --require features/support --require features/step_definitions -f progress'
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
@ -5,7 +5,7 @@ Feature: Basic Routing
|
||||
Given the profile "car"
|
||||
Given a grid size of 500 meters
|
||||
|
||||
@smallest
|
||||
@smallest @via
|
||||
Scenario: Summaries when routing on a simple network
|
||||
Given the node map
|
||||
"""
|
||||
|
@ -87,6 +87,7 @@ Feature: Traffic - turn penalties
|
||||
# hkl left turn
|
||||
# ade left turn
|
||||
And the contract extra arguments "--turn-penalty-file {penalties_file}"
|
||||
And the customize extra arguments "--turn-penalty-file {penalties_file}"
|
||||
When I route I should get
|
||||
| from | to | route | speed | weight | time |
|
||||
| a | h | ad,dhk | 65 km/h | 11 | 11s +-1 |
|
||||
@ -109,8 +110,9 @@ Feature: Traffic - turn penalties
|
||||
# double left - hdc penalty ever so slightly higher than imn; forces all the way around
|
||||
|
||||
Scenario: Too-negative penalty clamps, but does not fail
|
||||
Given the contract extra arguments "--turn-penalty-file {penalties_file}"
|
||||
And the profile "testbot"
|
||||
Given the profile "testbot"
|
||||
And the contract extra arguments "--turn-penalty-file {penalties_file}"
|
||||
And the customize extra arguments "--turn-penalty-file {penalties_file}"
|
||||
And the turn penalty file
|
||||
"""
|
||||
1,4,5,-10
|
||||
|
@ -53,6 +53,7 @@ Feature: Car - weights
|
||||
| ec | service | yes |
|
||||
And the extract extra arguments "--generate-edge-lookup"
|
||||
And the contract extra arguments "--segment-speed-file {speeds_file}"
|
||||
And the customize extra arguments "--segment-speed-file {speeds_file}"
|
||||
And the speed file
|
||||
"""
|
||||
2,5,8
|
||||
|
@ -77,7 +77,8 @@ class OSRMDirectLoader extends OSRMBaseLoader {
|
||||
osrmUp (callback) {
|
||||
if (this.osrmIsRunning()) return callback(new Error("osrm-routed already running!"));
|
||||
|
||||
this.child = this.scope.runBin('osrm-routed', util.format("%s -p %d", this.inputFile, this.scope.OSRM_PORT), this.scope.environment, (err) => {
|
||||
const command_arguments = util.format('%s -p %d -a %s', this.inputFile, this.scope.OSRM_PORT, this.scope.ROUTING_ALGORITHM);
|
||||
this.child = this.scope.runBin('osrm-routed', command_arguments, this.scope.environment, (err) => {
|
||||
if (err && err.signal !== 'SIGINT') {
|
||||
this.child = null;
|
||||
throw new Error(util.format('osrm-routed %s: %s', errorReason(err), err.cmd));
|
||||
@ -115,7 +116,8 @@ class OSRMDatastoreLoader extends OSRMBaseLoader {
|
||||
osrmUp (callback) {
|
||||
if (this.osrmIsRunning()) return callback();
|
||||
|
||||
this.child = this.scope.runBin('osrm-routed', util.format('--shared-memory=1 -p %d', this.scope.OSRM_PORT), this.scope.environment, (err) => {
|
||||
const command_arguments = util.format('--shared-memory=1 -p %d -a %s', this.scope.OSRM_PORT, this.scope.ROUTING_ALGORITHM);
|
||||
this.child = this.scope.runBin('osrm-routed', command_arguments, this.scope.environment, (err) => {
|
||||
if (err && err.signal !== 'SIGINT') {
|
||||
this.child = null;
|
||||
throw new Error(util.format('osrm-routed %s: %s', errorReason(err), err.cmd));
|
||||
|
@ -1,4 +1,4 @@
|
||||
@prepare @options @version
|
||||
@contract @options @version
|
||||
Feature: osrm-contract command line options: version
|
||||
# the regex will match these two formats:
|
||||
# v0.3.7.0 # this is the normal format when you build from a git clone
|
||||
|
23
features/options/customize/files.feature
Normal file
23
features/options/customize/files.feature
Normal file
@ -0,0 +1,23 @@
|
||||
@customize @options @files
|
||||
Feature: osrm-contract command line options: files
|
||||
Background:
|
||||
Given the profile "testbot"
|
||||
And the node map
|
||||
"""
|
||||
a b
|
||||
"""
|
||||
And the ways
|
||||
| nodes |
|
||||
| ab |
|
||||
And the data has been extracted
|
||||
And the data has been partitioned
|
||||
|
||||
Scenario: osrm-customize - Passing base file
|
||||
When I run "osrm-customize {processed_file}"
|
||||
Then it should exit successfully
|
||||
|
||||
Scenario: osrm-customize - Missing input file
|
||||
When I try to run "osrm-customize over-the-rainbow.osrm"
|
||||
And stderr should contain "over-the-rainbow.osrm"
|
||||
And stderr should contain "not found"
|
||||
And it should exit with an error
|
35
features/options/customize/help.feature
Normal file
35
features/options/customize/help.feature
Normal file
@ -0,0 +1,35 @@
|
||||
@contract @options @help
|
||||
Feature: osrm-customize command line options: help
|
||||
|
||||
Scenario: osrm-customize - Help should be shown when no options are passed
|
||||
When I try to run "osrm-customize"
|
||||
Then stderr should be empty
|
||||
And stdout should contain /osrm-customize(.exe)? <input.osrm> \[options\]:/
|
||||
And stdout should contain "Options:"
|
||||
And stdout should contain "--version"
|
||||
And stdout should contain "--help"
|
||||
And stdout should contain "Configuration:"
|
||||
And stdout should contain "--threads"
|
||||
And it should exit with an error
|
||||
|
||||
Scenario: osrm-customize - Help, short
|
||||
When I run "osrm-customize -h"
|
||||
Then stderr should be empty
|
||||
And stdout should contain /osrm-customize(.exe)? <input.osrm> \[options\]:/
|
||||
And stdout should contain "Options:"
|
||||
And stdout should contain "--version"
|
||||
And stdout should contain "--help"
|
||||
And stdout should contain "Configuration:"
|
||||
And stdout should contain "--threads"
|
||||
And it should exit successfully
|
||||
|
||||
Scenario: osrm-customize - Help, long
|
||||
When I run "osrm-customize --help"
|
||||
Then stderr should be empty
|
||||
And stdout should contain /osrm-customize(.exe)? <input.osrm> \[options\]:/
|
||||
And stdout should contain "Options:"
|
||||
And stdout should contain "--version"
|
||||
And stdout should contain "--help"
|
||||
And stdout should contain "Configuration:"
|
||||
And stdout should contain "--threads"
|
||||
And it should exit successfully
|
20
features/options/customize/invalid.feature
Normal file
20
features/options/customize/invalid.feature
Normal file
@ -0,0 +1,20 @@
|
||||
@prepare @options @invalid
|
||||
Feature: osrm-customize command line options: invalid options
|
||||
|
||||
Background:
|
||||
Given the profile "testbot"
|
||||
And the node map
|
||||
"""
|
||||
a b
|
||||
"""
|
||||
And the ways
|
||||
| nodes |
|
||||
| ab |
|
||||
And the data has been extracted
|
||||
|
||||
Scenario: osrm-customize - Non-existing option
|
||||
When I try to run "osrm-customize --fly-me-to-the-moon"
|
||||
Then stdout should be empty
|
||||
And stderr should contain "option"
|
||||
And stderr should contain "fly-me-to-the-moon"
|
||||
And it should exit with an error
|
19
features/options/customize/version.feature
Normal file
19
features/options/customize/version.feature
Normal file
@ -0,0 +1,19 @@
|
||||
@prepare @options @version
|
||||
Feature: osrm-customize command line options: version
|
||||
|
||||
Background:
|
||||
Given the profile "testbot"
|
||||
|
||||
Scenario: osrm-customize - Version, short
|
||||
When I run "osrm-customize --v"
|
||||
Then stderr should be empty
|
||||
And stdout should contain 1 line
|
||||
And stdout should contain /(v\d{1,2}\.\d{1,2}\.\d{1,2}|\w*-\d+-\w+)/
|
||||
And it should exit successfully
|
||||
|
||||
Scenario: osrm-customize - Version, long
|
||||
When I run "osrm-customize --version"
|
||||
Then stderr should be empty
|
||||
And stdout should contain 1 line
|
||||
And stdout should contain /(v\d{1,2}\.\d{1,2}\.\d{1,2}|\w*-\d+-\w+)/
|
||||
And it should exit successfully
|
22
features/options/partition/files.feature
Normal file
22
features/options/partition/files.feature
Normal file
@ -0,0 +1,22 @@
|
||||
@partition @options @files
|
||||
Feature: osrm-partition command line options: files
|
||||
Background:
|
||||
Given the profile "testbot"
|
||||
And the node map
|
||||
"""
|
||||
a b
|
||||
"""
|
||||
And the ways
|
||||
| nodes |
|
||||
| ab |
|
||||
And the data has been extracted
|
||||
|
||||
Scenario: osrm-partition - Passing base file
|
||||
When I run "osrm-partition {processed_file}"
|
||||
Then it should exit successfully
|
||||
|
||||
Scenario: osrm-partition - Missing input file
|
||||
When I try to run "osrm-partition over-the-rainbow.osrm"
|
||||
And stderr should contain "over-the-rainbow.osrm"
|
||||
And stderr should contain "not found"
|
||||
And it should exit with an error
|
50
features/options/partition/help.feature
Normal file
50
features/options/partition/help.feature
Normal file
@ -0,0 +1,50 @@
|
||||
@partition @options @help
|
||||
Feature: osrm-partition command line options: help
|
||||
|
||||
Scenario: osrm-partition - Help should be shown when no options are passed
|
||||
When I try to run "osrm-partition"
|
||||
Then stderr should be empty
|
||||
And stdout should contain /osrm-partition(.exe)? <input.osrm> \[options\]:/
|
||||
And stdout should contain "Options:"
|
||||
And stdout should contain "--version"
|
||||
And stdout should contain "--help"
|
||||
And stdout should contain "Configuration:"
|
||||
And stdout should contain "--threads"
|
||||
And stdout should contain "--min-cell-size"
|
||||
And stdout should contain "--balance"
|
||||
And stdout should contain "--boundary"
|
||||
And stdout should contain "--optimizing-cuts"
|
||||
And stdout should contain "--small-component-size"
|
||||
And it should exit with an error
|
||||
|
||||
Scenario: osrm-partition - Help, short
|
||||
When I run "osrm-partition -h"
|
||||
Then stderr should be empty
|
||||
And stdout should contain /osrm-partition(.exe)? <input.osrm> \[options\]:/
|
||||
And stdout should contain "Options:"
|
||||
And stdout should contain "--version"
|
||||
And stdout should contain "--help"
|
||||
And stdout should contain "Configuration:"
|
||||
And stdout should contain "--threads"
|
||||
And stdout should contain "--min-cell-size"
|
||||
And stdout should contain "--balance"
|
||||
And stdout should contain "--boundary"
|
||||
And stdout should contain "--optimizing-cuts"
|
||||
And stdout should contain "--small-component-size"
|
||||
And it should exit successfully
|
||||
|
||||
Scenario: osrm-partition - Help, long
|
||||
When I run "osrm-partition --help"
|
||||
Then stderr should be empty
|
||||
And stdout should contain /osrm-partition(.exe)? <input.osrm> \[options\]:/
|
||||
And stdout should contain "Options:"
|
||||
And stdout should contain "--version"
|
||||
And stdout should contain "--help"
|
||||
And stdout should contain "Configuration:"
|
||||
And stdout should contain "--threads"
|
||||
And stdout should contain "--min-cell-size"
|
||||
And stdout should contain "--balance"
|
||||
And stdout should contain "--boundary"
|
||||
And stdout should contain "--optimizing-cuts"
|
||||
And stdout should contain "--small-component-size"
|
||||
And it should exit successfully
|
20
features/options/partition/invalid.feature
Normal file
20
features/options/partition/invalid.feature
Normal file
@ -0,0 +1,20 @@
|
||||
@partition @options @invalid
|
||||
Feature: osrm-partition command line options: invalid options
|
||||
|
||||
Background:
|
||||
Given the profile "testbot"
|
||||
And the node map
|
||||
"""
|
||||
a b
|
||||
"""
|
||||
And the ways
|
||||
| nodes |
|
||||
| ab |
|
||||
And the data has been extracted
|
||||
|
||||
Scenario: osrm-partition - Non-existing option
|
||||
When I try to run "osrm-partition --fly-me-to-the-moon"
|
||||
Then stdout should be empty
|
||||
And stderr should contain "option"
|
||||
And stderr should contain "fly-me-to-the-moon"
|
||||
And it should exit with an error
|
19
features/options/partition/version.feature
Normal file
19
features/options/partition/version.feature
Normal file
@ -0,0 +1,19 @@
|
||||
@partition @options @version
|
||||
Feature: osrm-partition command line options: version
|
||||
|
||||
Background:
|
||||
Given the profile "testbot"
|
||||
|
||||
Scenario: osrm-partition - Version, short
|
||||
When I run "osrm-partition --v"
|
||||
Then stderr should be empty
|
||||
And stdout should contain 1 line
|
||||
And stdout should contain /(v\d{1,2}\.\d{1,2}\.\d{1,2}|\w*-\d+-\w+)/
|
||||
And it should exit successfully
|
||||
|
||||
Scenario: osrm-partition - Version, long
|
||||
When I run "osrm-partition --version"
|
||||
Then stderr should be empty
|
||||
And stdout should contain 1 line
|
||||
And stdout should contain /(v\d{1,2}\.\d{1,2}\.\d{1,2}|\w*-\d+-\w+)/
|
||||
And it should exit successfully
|
@ -23,6 +23,16 @@ module.exports = function () {
|
||||
callback();
|
||||
});
|
||||
|
||||
this.Given(/^the partition extra arguments "(.*?)"$/, (args, callback) => {
|
||||
this.partitionArgs = this.expandOptions(args);
|
||||
callback();
|
||||
});
|
||||
|
||||
this.Given(/^the customize extra arguments "(.*?)"$/, (args, callback) => {
|
||||
this.customizeArgs = this.expandOptions(args);
|
||||
callback();
|
||||
});
|
||||
|
||||
this.Given(/^a grid size of ([0-9.]+) meters$/, (meters, callback) => {
|
||||
this.setGridSize(meters);
|
||||
callback();
|
||||
@ -259,11 +269,7 @@ module.exports = function () {
|
||||
this.writeAndLinkOSM(callback);
|
||||
});
|
||||
|
||||
this.Given(/^the data has been extracted$/, (callback) => {
|
||||
this.reprocess(callback);
|
||||
});
|
||||
|
||||
this.Given(/^the data has been contracted$/, (callback) => {
|
||||
this.Given(/^the data has been (extract|contract|partition|customiz)ed$/, (step, callback) => {
|
||||
this.reprocess(callback);
|
||||
});
|
||||
|
||||
|
@ -25,32 +25,16 @@ module.exports = function () {
|
||||
this.runAndSafeOutput('osrm-routed', options, callback);
|
||||
});
|
||||
|
||||
this.When(/^I run "osrm\-extract\s?(.*?)"$/, (options, callback) => {
|
||||
const stamp = this.processedCacheFile + '.extract';
|
||||
this.runAndSafeOutput('osrm-extract', options, (err) => {
|
||||
this.When(/^I run "osrm\-(extract|contract|partition|customize)\s?(.*?)"$/, (binary, options, callback) => {
|
||||
const stamp = this.processedCacheFile + '.stamp_' + binary;
|
||||
this.runAndSafeOutput('osrm-' + binary, options, (err) => {
|
||||
if (err) return callback(err);
|
||||
fs.writeFile(stamp, 'ok', callback);
|
||||
});
|
||||
});
|
||||
|
||||
this.When(/^I run "osrm\-contract\s?(.*?)"$/, (options, callback) => {
|
||||
const stamp = this.processedCacheFile + '.contract';
|
||||
this.runAndSafeOutput('osrm-contract', options, (err) => {
|
||||
if (err) return callback(err);
|
||||
fs.writeFile(stamp, 'ok', callback);
|
||||
});
|
||||
});
|
||||
|
||||
this.When(/^I try to run "osrm\-routed\s?(.*?)"$/, (options, callback) => {
|
||||
this.runAndSafeOutput('osrm-routed', options, () => { callback(); });
|
||||
});
|
||||
|
||||
this.When(/^I try to run "osrm\-extract\s?(.*?)"$/, (options, callback) => {
|
||||
this.runAndSafeOutput('osrm-extract', options, () => { callback(); });
|
||||
});
|
||||
|
||||
this.When(/^I try to run "osrm\-contract\s?(.*?)"$/, (options, callback) => {
|
||||
this.runAndSafeOutput('osrm-contract', options, () => { callback(); });
|
||||
this.When(/^I try to run "(osrm\-[a-z]+)\s?(.*?)"$/, (binary, options, callback) => {
|
||||
this.runAndSafeOutput(binary, options, () => { callback(); });
|
||||
});
|
||||
|
||||
this.When(/^I run "osrm\-datastore\s?(.*?)"(?: with input "([^"]*)")?$/, (options, input, callback) => {
|
||||
@ -68,8 +52,9 @@ module.exports = function () {
|
||||
assert.ok(this.exitCode !== 0 || this.termSignal);
|
||||
});
|
||||
|
||||
this.Then(/^stdout should contain "(.*?)"$/, (str) => {
|
||||
assert.ok(this.stdout.indexOf(str) > -1);
|
||||
this.Then(/^stdout should( not)? contain "(.*?)"$/, (not, str) => {
|
||||
const contains = this.stdout.indexOf(str) > -1;
|
||||
assert.ok(typeof not === 'undefined' ? contains : !contains);
|
||||
});
|
||||
|
||||
this.Then(/^stderr should( not)? contain "(.*?)"$/, (not, str) => {
|
||||
|
@ -179,7 +179,7 @@ module.exports = function () {
|
||||
};
|
||||
|
||||
this.extractData = (p, callback) => {
|
||||
let stamp = p.processedCacheFile + '.extract';
|
||||
let stamp = p.processedCacheFile + '.stamp_extract';
|
||||
fs.exists(stamp, (exists) => {
|
||||
if (exists) return callback();
|
||||
|
||||
@ -193,7 +193,7 @@ module.exports = function () {
|
||||
};
|
||||
|
||||
this.contractData = (p, callback) => {
|
||||
let stamp = p.processedCacheFile + '.contract';
|
||||
let stamp = p.processedCacheFile + '.stamp_contract';
|
||||
fs.exists(stamp, (exists) => {
|
||||
if (exists) return callback();
|
||||
|
||||
@ -206,15 +206,46 @@ module.exports = function () {
|
||||
});
|
||||
};
|
||||
|
||||
this.extractAndContract = (callback) => {
|
||||
this.partitionData = (p, callback) => {
|
||||
let stamp = p.processedCacheFile + '.stamp_partition';
|
||||
fs.exists(stamp, (exists) => {
|
||||
if (exists) return callback();
|
||||
|
||||
this.runBin('osrm-partition', util.format('%s %s', p.partitionArgs, p.processedCacheFile), p.environment, (err) => {
|
||||
if (err) {
|
||||
return callback(new Error(util.format('osrm-partition %s: %s', errorReason(err), err.cmd)));
|
||||
}
|
||||
fs.writeFile(stamp, 'ok', callback);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
this.customizeData = (p, callback) => {
|
||||
let stamp = p.processedCacheFile + '.stamp_customize';
|
||||
fs.exists(stamp, (exists) => {
|
||||
if (exists) return callback();
|
||||
|
||||
this.runBin('osrm-customize', util.format('%s %s', p.customizeArgs, p.processedCacheFile), p.environment, (err) => {
|
||||
if (err) {
|
||||
return callback(new Error(util.format('osrm-customize %s: %s', errorReason(err), err)));
|
||||
}
|
||||
fs.writeFile(stamp, 'ok', callback);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
this.extractContractPartitionAndCustomize = (callback) => {
|
||||
// a shallow copy of scenario parameters to avoid data inconsistency
|
||||
// if a cucumber timeout occurs during deferred jobs
|
||||
let p = {extractArgs: this.extractArgs, contractArgs: this.contractArgs,
|
||||
partitionArgs: this.partitionArgs, customizeArgs: this.customizeArgs,
|
||||
profileFile: this.profileFile, inputCacheFile: this.inputCacheFile,
|
||||
processedCacheFile: this.processedCacheFile, environment: this.environment};
|
||||
let queue = d3.queue(1);
|
||||
queue.defer(this.extractData.bind(this), p);
|
||||
queue.defer(this.contractData.bind(this), p);
|
||||
queue.defer(this.partitionData.bind(this), p);
|
||||
queue.defer(this.customizeData.bind(this), p);
|
||||
queue.awaitAll(callback);
|
||||
};
|
||||
|
||||
@ -228,14 +259,14 @@ module.exports = function () {
|
||||
this.reprocess = (callback) => {
|
||||
let queue = d3.queue(1);
|
||||
queue.defer(this.writeAndLinkOSM.bind(this));
|
||||
queue.defer(this.extractAndContract.bind(this));
|
||||
queue.defer(this.extractContractPartitionAndCustomize.bind(this));
|
||||
queue.awaitAll(callback);
|
||||
};
|
||||
|
||||
this.reprocessAndLoadData = (callback) => {
|
||||
let queue = d3.queue(1);
|
||||
queue.defer(this.writeAndLinkOSM.bind(this));
|
||||
queue.defer(this.extractAndContract.bind(this));
|
||||
queue.defer(this.extractContractPartitionAndCustomize.bind(this));
|
||||
queue.defer(this.osrmLoader.load.bind(this.osrmLoader), this.processedCacheFile);
|
||||
queue.awaitAll(callback);
|
||||
};
|
||||
|
@ -34,11 +34,12 @@ module.exports = function () {
|
||||
this.DEFAULT_LOAD_METHOD = 'datastore';
|
||||
this.DEFAULT_ORIGIN = [1,1];
|
||||
this.OSM_USER = 'osrm';
|
||||
this.OSM_GENERATOR = 'osrm-test';
|
||||
this.OSM_UID = 1;
|
||||
this.OSM_TIMESTAMP = '2000-01-01T00:00:00Z';
|
||||
this.WAY_SPACING = 100;
|
||||
this.DEFAULT_GRID_SIZE = 100; // meters
|
||||
// get algorithm name from the command line profile argument
|
||||
this.ROUTING_ALGORITHM = process.argv[process.argv.indexOf('-p') + 1] === 'mld' ? 'MLD' : 'CH';
|
||||
|
||||
this.OSRM_PORT = process.env.OSRM_PORT && parseInt(process.env.OSRM_PORT) || 5000;
|
||||
this.HOST = 'http://127.0.0.1:' + this.OSRM_PORT;
|
||||
|
@ -35,6 +35,8 @@ module.exports = function () {
|
||||
this.queryParams = {};
|
||||
this.extractArgs = '';
|
||||
this.contractArgs = '';
|
||||
this.partitionArgs = '';
|
||||
this.customizeArgs = '';
|
||||
this.environment = Object.assign(this.DEFAULT_ENVIRONMENT);
|
||||
this.resetOSM();
|
||||
|
||||
|
@ -46,6 +46,10 @@ module.exports = function () {
|
||||
let child = child_process.execFile(cmd, opts, {maxBuffer: 1024 * 1024 * 1000, env: env}, callback);
|
||||
child.on('exit', function(code) {
|
||||
log.write(util.format('*** %s exited with code %d\n', bin, code));
|
||||
// remove listeners and close log file -> some tail messages can be lost
|
||||
child.stdout.removeListener('data', child.logFunc);
|
||||
child.stderr.removeListener('data', child.logFunc);
|
||||
log.end();
|
||||
}.bind(this));
|
||||
this.setupOutputLog(child, log);
|
||||
return child;
|
||||
|
@ -4,6 +4,7 @@ Feature: Avoid weird loops caused by rounding errors
|
||||
Background:
|
||||
Given the profile "testbot"
|
||||
|
||||
@via
|
||||
Scenario: Weird sidestreet loops
|
||||
Given the node map
|
||||
"""
|
||||
@ -72,7 +73,7 @@ Feature: Avoid weird loops caused by rounding errors
|
||||
| from | to | route |
|
||||
| x | y | abc,abc |
|
||||
|
||||
@412
|
||||
@412 @via
|
||||
Scenario: Avoid weird loops 3
|
||||
And the node map
|
||||
"""
|
||||
|
@ -139,6 +139,7 @@ Feature: Basic Map Matching
|
||||
"""
|
||||
|
||||
And the contract extra arguments "--segment-speed-file {speeds_file}"
|
||||
And the customize extra arguments "--segment-speed-file {speeds_file}"
|
||||
|
||||
When I match I should get
|
||||
| trace | matchings | a:duration |
|
||||
@ -165,6 +166,7 @@ Feature: Basic Map Matching
|
||||
"""
|
||||
|
||||
And the contract extra arguments "--segment-speed-file {speeds_file}"
|
||||
And the customize extra arguments "--segment-speed-file {speeds_file}"
|
||||
|
||||
When I match I should get
|
||||
| trace | matchings | a:duration |
|
||||
|
131
features/testbot/multi_level_routing.feature
Normal file
131
features/testbot/multi_level_routing.feature
Normal file
@ -0,0 +1,131 @@
|
||||
@routing @testbot @mld
|
||||
Feature: Multi level routing
|
||||
|
||||
Background:
|
||||
Given the profile "testbot"
|
||||
And the partition extra arguments "--min-cell-size 4 --small-component-size 1"
|
||||
|
||||
Scenario: Testbot - Multi level routing check partition
|
||||
Given the node map
|
||||
"""
|
||||
a───b───e───f
|
||||
│ │ │ │
|
||||
d───c h───g
|
||||
╲ ╱
|
||||
╳
|
||||
╱ ╲
|
||||
i───j m───n
|
||||
│ │ │ │
|
||||
l───k───p───o
|
||||
"""
|
||||
|
||||
And the ways
|
||||
| nodes | highway |
|
||||
| abcda | primary |
|
||||
| efghe | primary |
|
||||
| ijkli | primary |
|
||||
| nmop | primary |
|
||||
| cm | primary |
|
||||
| hj | primary |
|
||||
| kp | primary |
|
||||
| be | primary |
|
||||
|
||||
And the data has been extracted
|
||||
When I run "osrm-partition --min-cell-size 4 --small-component-size 1 {processed_file}"
|
||||
Then it should exit successfully
|
||||
And stdout should not contain "level 1 #cells 1 bit size 1"
|
||||
|
||||
Scenario: Testbot - Multi level routing
|
||||
Given the node map
|
||||
"""
|
||||
a───b e───f
|
||||
│ │ │ │
|
||||
d───c h───g
|
||||
╲ ╱
|
||||
╳
|
||||
╱ ╲
|
||||
i───j m───n
|
||||
│ │ │ │
|
||||
l───k───p───o
|
||||
"""
|
||||
And the ways
|
||||
| nodes | highway |
|
||||
| abcda | primary |
|
||||
| efghe | primary |
|
||||
| ijkli | primary |
|
||||
| nmop | primary |
|
||||
| cm | primary |
|
||||
| hj | primary |
|
||||
| kp | primary |
|
||||
And the partition extra arguments "--min-cell-size 4 --small-component-size 1"
|
||||
|
||||
When I route I should get
|
||||
| from | to | route | time |
|
||||
| a | b | abcda,abcda | 20s |
|
||||
| a | f | abcda,cm,nmop,kp,ijkli,hj,efghe,efghe | 257.7s |
|
||||
| c | m | cm,cm | 44.7s |
|
||||
|
||||
Scenario: Testbot - Multi level routing: horizontal road
|
||||
Given the node map
|
||||
"""
|
||||
a───b e───f
|
||||
│ │ │ │
|
||||
d───c h───g
|
||||
│ │
|
||||
i═══j═══k═══l
|
||||
│ │
|
||||
m───n q───r
|
||||
│ │ │ │
|
||||
p───o───t───s
|
||||
"""
|
||||
And the ways
|
||||
| nodes | highway |
|
||||
| abcda | primary |
|
||||
| efghe | primary |
|
||||
| mnopm | primary |
|
||||
| qrstq | primary |
|
||||
| ijkl | primary |
|
||||
| dim | primary |
|
||||
| glr | primary |
|
||||
| ot | secondary |
|
||||
And the partition extra arguments "--min-cell-size 4 --small-component-size 1"
|
||||
|
||||
When I route I should get
|
||||
| from | to | route | time |
|
||||
| a | b | abcda,abcda | 20s |
|
||||
| a | d | abcda,abcda | 20s |
|
||||
| a | l | abcda,dim,ijkl,ijkl | 100s |
|
||||
| a | p | abcda,dim,mnopm,mnopm | 80s |
|
||||
| a | o | abcda,dim,mnopm,mnopm | 100s |
|
||||
| a | t | abcda,dim,mnopm,ot,ot | 140s |
|
||||
| a | s | abcda,dim,ijkl,glr,qrstq,qrstq | 140s |
|
||||
| a | f | abcda,dim,ijkl,glr,efghe,efghe | 140s |
|
||||
|
||||
|
||||
Scenario: Testbot - Multi level routing: route over internal cell edge hf
|
||||
Given the node map
|
||||
"""
|
||||
a───b
|
||||
│ │
|
||||
d───c──e───f
|
||||
╲ │ ╳ │ ╲
|
||||
h───g──i───j
|
||||
│ │
|
||||
l───k
|
||||
"""
|
||||
And the ways
|
||||
| nodes | maxspeed |
|
||||
| abcda | 5 |
|
||||
| efghe | 5 |
|
||||
| ijkli | 5 |
|
||||
| eg | 10 |
|
||||
| ce | 15 |
|
||||
| ch | 15 |
|
||||
| fi | 15 |
|
||||
| gi | 15 |
|
||||
| hf | 100 |
|
||||
And the partition extra arguments "--min-cell-size 4 --small-component-size 1"
|
||||
|
||||
When I route I should get
|
||||
| from | to | route | time |
|
||||
| a | k | abcda,ch,hf,fi,ijkli,ijkli | 724.3s |
|
@ -27,7 +27,8 @@ Feature: Traffic - speeds
|
||||
|
||||
Scenario: Weighting based on speed file
|
||||
Given the contract extra arguments "--segment-speed-file {speeds_file}"
|
||||
Given the speed file
|
||||
And the customize extra arguments "--segment-speed-file {speeds_file}"
|
||||
And the speed file
|
||||
"""
|
||||
1,2,0
|
||||
2,1,0
|
||||
@ -52,7 +53,8 @@ Feature: Traffic - speeds
|
||||
|
||||
Scenario: Weighting based on speed file weights, ETA based on file durations
|
||||
Given the contract extra arguments "--segment-speed-file {speeds_file}"
|
||||
Given the speed file
|
||||
And the customize extra arguments "--segment-speed-file {speeds_file}"
|
||||
And the speed file
|
||||
"""
|
||||
1,2,1,20020.7
|
||||
2,1,1,20020.7
|
||||
@ -84,7 +86,8 @@ Feature: Traffic - speeds
|
||||
properties.weight_precision = 3
|
||||
"""
|
||||
And the contract extra arguments "--segment-speed-file {speeds_file}"
|
||||
Given the speed file
|
||||
And the customize extra arguments "--segment-speed-file {speeds_file}"
|
||||
And the speed file
|
||||
"""
|
||||
1,2,1,20020.789
|
||||
2,1,1,20020.123
|
||||
@ -110,10 +113,11 @@ Feature: Traffic - speeds
|
||||
|
||||
Scenario: Speeds that isolate a single node (a)
|
||||
Given the contract extra arguments "--segment-speed-file {speeds_file}"
|
||||
And the customize extra arguments "--segment-speed-file {speeds_file}"
|
||||
And the node locations
|
||||
| node | lat | lon |
|
||||
| h | 2.075 | 19.1 |
|
||||
Given the speed file
|
||||
And the speed file
|
||||
"""
|
||||
1,2,0
|
||||
2,1,0
|
||||
|
@ -40,6 +40,8 @@ Feature: Traffic - turn penalties applied to turn onto which a phantom node snap
|
||||
3,4,7,-30
|
||||
"""
|
||||
And the contract extra arguments "--turn-penalty-file {penalties_file}"
|
||||
And the customize extra arguments "--turn-penalty-file {penalties_file}"
|
||||
|
||||
When I route I should get
|
||||
| from | to | route | speed | time |
|
||||
| a | e | ab,be,be | 36 km/h | 40s +-1 |
|
||||
@ -56,6 +58,8 @@ Feature: Traffic - turn penalties applied to turn onto which a phantom node snap
|
||||
3,4,7,-30,100.75
|
||||
"""
|
||||
And the contract extra arguments "--turn-penalty-file {penalties_file}"
|
||||
And the customize extra arguments "--turn-penalty-file {penalties_file}"
|
||||
|
||||
When I route I should get
|
||||
| from | to | route | speed | time | weights |
|
||||
| a | e | ab,be,be | 36 km/h | 40s +-1 | 16.7,20,0 |
|
||||
|
@ -240,6 +240,7 @@ Feature: Weight tests
|
||||
| e,d | ,, | 40m +-.1 | 4.009,1.11,0 | 189.9s,100s,0s |
|
||||
| d,e | ,, | 40m +-.1 | 2.21,1.11,0 | 10.1s,100s,0s |
|
||||
|
||||
@traffic @speed
|
||||
Scenario: Step weights -- segment_function with speed and turn updates
|
||||
Given the profile file "testbot" extended with
|
||||
"""
|
||||
@ -279,6 +280,7 @@ Feature: Weight tests
|
||||
2,3,5,25.5,16.7
|
||||
"""
|
||||
And the contract extra arguments "--segment-speed-file {speeds_file} --turn-penalty-file {penalties_file}"
|
||||
And the customize extra arguments "--segment-speed-file {speeds_file} --turn-penalty-file {penalties_file}"
|
||||
|
||||
When I route I should get
|
||||
| waypoints | route | distance | weights | times |
|
||||
@ -286,6 +288,7 @@ Feature: Weight tests
|
||||
| a,e | ,, | 60.1m | 68.7,10,0 | 38.5s,11s,0s |
|
||||
| d,e | ,, | 39.9m | 10,10,0 | 11s,11s,0s |
|
||||
|
||||
@traffic @speed
|
||||
Scenario: Step weights -- segment_function with speed and turn updates with fallback to durations
|
||||
Given the profile file "testbot" extended with
|
||||
"""
|
||||
@ -313,6 +316,7 @@ Feature: Weight tests
|
||||
2,3,5,1
|
||||
"""
|
||||
And the contract extra arguments "--segment-speed-file {speeds_file} --turn-penalty-file {penalties_file}"
|
||||
And the customize extra arguments "--segment-speed-file {speeds_file} --turn-penalty-file {penalties_file}"
|
||||
|
||||
When I route I should get
|
||||
| waypoints | route | distance | weights | times |
|
||||
|
@ -139,13 +139,13 @@ class GraphContractor
|
||||
if (!data.is_original_via_node_ID && !orig_node_id_from_new_node_id_map.empty())
|
||||
{
|
||||
// tranlate the _node id_ of the shortcutted node
|
||||
new_edge.data.id = orig_node_id_from_new_node_id_map[data.id];
|
||||
new_edge.data.turn_id = orig_node_id_from_new_node_id_map[data.id];
|
||||
}
|
||||
else
|
||||
{
|
||||
new_edge.data.id = data.id;
|
||||
new_edge.data.turn_id = data.id;
|
||||
}
|
||||
BOOST_ASSERT_MSG(new_edge.data.id != INT_MAX, // 2^31
|
||||
BOOST_ASSERT_MSG(new_edge.data.turn_id != INT_MAX, // 2^31
|
||||
"edge id invalid");
|
||||
new_edge.data.forward = data.forward;
|
||||
new_edge.data.backward = data.backward;
|
||||
|
@ -38,7 +38,7 @@ std::vector<ContractorEdge> adaptToContractorInput(InputEdgeContainer input_edge
|
||||
std::max(input_edge.data.weight, 1),
|
||||
input_edge.data.duration,
|
||||
1,
|
||||
input_edge.data.edge_id,
|
||||
input_edge.data.turn_id,
|
||||
false,
|
||||
input_edge.data.forward ? true : false,
|
||||
input_edge.data.backward ? true : false);
|
||||
@ -48,7 +48,7 @@ std::vector<ContractorEdge> adaptToContractorInput(InputEdgeContainer input_edge
|
||||
std::max(input_edge.data.weight, 1),
|
||||
input_edge.data.duration,
|
||||
1,
|
||||
input_edge.data.edge_id,
|
||||
input_edge.data.turn_id,
|
||||
false,
|
||||
input_edge.data.backward ? true : false,
|
||||
input_edge.data.forward ? true : false);
|
||||
|
@ -17,7 +17,7 @@ struct QueryEdge
|
||||
struct EdgeData
|
||||
{
|
||||
explicit EdgeData()
|
||||
: id(0), shortcut(false), weight(0), duration(0), forward(false), backward(false)
|
||||
: turn_id(0), shortcut(false), weight(0), duration(0), forward(false), backward(false)
|
||||
{
|
||||
}
|
||||
|
||||
@ -26,14 +26,14 @@ struct QueryEdge
|
||||
weight = other.weight;
|
||||
duration = other.duration;
|
||||
shortcut = other.shortcut;
|
||||
id = other.id;
|
||||
turn_id = other.id;
|
||||
forward = other.forward;
|
||||
backward = other.backward;
|
||||
}
|
||||
// this ID is either the middle node of the shortcut, or the ID of the edge based node (node
|
||||
// based edge) storing the appropriate data. If `shortcut` is set to true, we get the middle
|
||||
// node. Otherwise we see the edge based node to access node data.
|
||||
NodeID id : 31;
|
||||
NodeID turn_id : 31;
|
||||
bool shortcut : 1;
|
||||
EdgeWeight weight;
|
||||
EdgeWeight duration : 30;
|
||||
@ -58,7 +58,7 @@ struct QueryEdge
|
||||
return (source == right.source && target == right.target &&
|
||||
data.weight == right.data.weight && data.duration == right.data.duration &&
|
||||
data.shortcut == right.data.shortcut && data.forward == right.data.forward &&
|
||||
data.backward == right.data.backward && data.id == right.data.id);
|
||||
data.backward == right.data.backward && data.turn_id == right.data.turn_id);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ template <> struct HasManyToManySearch<algorithm::MLD> final : std::false_type
|
||||
template <> struct HasShortestPathSearch<algorithm::MLD> final : std::false_type
|
||||
{
|
||||
};
|
||||
template <> struct HasDirectShortestPathSearch<algorithm::MLD> final : std::false_type
|
||||
template <> struct HasDirectShortestPathSearch<algorithm::MLD> final : std::true_type
|
||||
{
|
||||
};
|
||||
template <> struct HasMapMatching<algorithm::MLD> final : std::false_type
|
||||
|
@ -89,6 +89,9 @@ template <> class AlgorithmDataFacade<algorithm::MLD>
|
||||
virtual const partition::MultiLevelPartitionView &GetMultiLevelPartition() const = 0;
|
||||
|
||||
virtual const partition::CellStorageView &GetCellStorage() const = 0;
|
||||
|
||||
// searches for a specific edge
|
||||
virtual EdgeID FindEdge(const NodeID from, const NodeID to) const = 0;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -939,10 +939,6 @@ class ContiguousInternalMemoryAlgorithmDataFacade<algorithm::MLD>
|
||||
|
||||
if (data_layout.GetBlockSize(storage::DataLayout::MLD_CELL_WEIGHTS) > 0)
|
||||
{
|
||||
BOOST_ASSERT(data_layout.GetBlockSize(storage::DataLayout::MLD_CELL_SOURCE_BOUNDARY) >
|
||||
0);
|
||||
BOOST_ASSERT(
|
||||
data_layout.GetBlockSize(storage::DataLayout::MLD_CELL_DESTINATION_BOUNDARY) > 0);
|
||||
BOOST_ASSERT(data_layout.GetBlockSize(storage::DataLayout::MLD_CELLS) > 0);
|
||||
BOOST_ASSERT(data_layout.GetBlockSize(storage::DataLayout::MLD_CELL_LEVEL_OFFSETS) > 0);
|
||||
|
||||
@ -1071,6 +1067,12 @@ class ContiguousInternalMemoryDataFacade<algorithm::MLD>
|
||||
{
|
||||
return m_query_graph->GetAdjacentEdgeRange(node);
|
||||
}
|
||||
|
||||
// searches for a specific edge
|
||||
EdgeID FindEdge(const NodeID from, const NodeID to) const override final
|
||||
{
|
||||
return m_query_graph->FindEdge(from, to);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -209,13 +209,22 @@ inline std::vector<RouteStep> assembleSteps(const datafacade::BaseDataFacade &fa
|
||||
else
|
||||
{
|
||||
BOOST_ASSERT(source_node.fwd_segment_position == target_node.fwd_segment_position);
|
||||
// s t
|
||||
// u-------------v
|
||||
// |---| source_duration
|
||||
// |---------| target_duration
|
||||
BOOST_ASSERT(source_traversed_in_reverse == target_traversed_in_reverse);
|
||||
|
||||
const EdgeWeight duration = target_duration - source_duration;
|
||||
// The difference (target-source) should handle
|
||||
// all variants for similar directions u-v and s-t (and opposite)
|
||||
// s(t) t(s) source_traversed_in_reverse = target_traversed_in_reverse = false
|
||||
// u-------------v
|
||||
// |---| source_weight
|
||||
// |---------| target_weight
|
||||
|
||||
// s(t) t(s) source_traversed_in_reverse = target_traversed_in_reverse = true
|
||||
// u-------------v
|
||||
// | |---------| source_weight
|
||||
// | |---| target_weight
|
||||
const EdgeWeight weight = target_weight - source_weight;
|
||||
const EdgeWeight duration = target_duration - source_duration;
|
||||
BOOST_ASSERT(weight >= 0);
|
||||
BOOST_ASSERT(duration >= 0);
|
||||
|
||||
steps.push_back(RouteStep{source_node.name_id,
|
||||
|
@ -193,13 +193,6 @@ RoutingAlgorithms<algorithm::MLD>::ShortestPathSearch(const std::vector<PhantomN
|
||||
throw util::exception("ShortestPathSearch is not implemented");
|
||||
}
|
||||
|
||||
template <>
|
||||
InternalRouteResult inline RoutingAlgorithms<algorithm::MLD>::DirectShortestPathSearch(
|
||||
const PhantomNodes &) const
|
||||
{
|
||||
throw util::exception("DirectShortestPathSearch is not implemented");
|
||||
}
|
||||
|
||||
template <>
|
||||
inline std::vector<EdgeWeight>
|
||||
RoutingAlgorithms<algorithm::MLD>::ManyToManySearch(const std::vector<PhantomNode> &,
|
||||
|
@ -21,15 +21,11 @@ namespace routing_algorithms
|
||||
/// by the previous route.
|
||||
/// This variation is only an optimazation for graphs with slow queries, for example
|
||||
/// not fully contracted graphs.
|
||||
InternalRouteResult directShortestPathSearch(
|
||||
SearchEngineData &engine_working_data,
|
||||
const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
const PhantomNodes &phantom_nodes);
|
||||
|
||||
InternalRouteResult directShortestPathSearch(
|
||||
SearchEngineData &engine_working_data,
|
||||
const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CoreCH> &facade,
|
||||
const PhantomNodes &phantom_nodes);
|
||||
template <typename AlgorithmT>
|
||||
InternalRouteResult
|
||||
directShortestPathSearch(SearchEngineData &engine_working_data,
|
||||
const datafacade::ContiguousInternalMemoryDataFacade<AlgorithmT> &facade,
|
||||
const PhantomNodes &phantom_nodes);
|
||||
|
||||
} // namespace routing_algorithms
|
||||
} // namespace engine
|
||||
|
@ -1,5 +1,5 @@
|
||||
#ifndef ROUTING_BASE_HPP
|
||||
#define ROUTING_BASE_HPP
|
||||
#ifndef OSRM_ENGINE_ROUTING_BASE_HPP
|
||||
#define OSRM_ENGINE_ROUTING_BASE_HPP
|
||||
|
||||
#include "extractor/guidance/turn_instruction.hpp"
|
||||
|
||||
@ -35,382 +35,150 @@ namespace routing_algorithms
|
||||
{
|
||||
static constexpr bool FORWARD_DIRECTION = true;
|
||||
static constexpr bool REVERSE_DIRECTION = false;
|
||||
static constexpr bool DO_NOT_FORCE_LOOPS = false;
|
||||
|
||||
// Stalling
|
||||
template <bool DIRECTION, typename HeapT>
|
||||
bool stallAtNode(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
const NodeID node,
|
||||
const EdgeWeight weight,
|
||||
const HeapT &query_heap)
|
||||
template <bool DIRECTION, typename Heap>
|
||||
void insertNodesInHeap(Heap &heap, const PhantomNode &phantom_node)
|
||||
{
|
||||
for (auto edge : facade.GetAdjacentEdgeRange(node))
|
||||
BOOST_ASSERT(phantom_node.IsValid());
|
||||
|
||||
const auto weight_sign = DIRECTION == FORWARD_DIRECTION ? -1 : 1;
|
||||
if (phantom_node.forward_segment_id.enabled)
|
||||
{
|
||||
const auto &data = facade.GetEdgeData(edge);
|
||||
if (DIRECTION == REVERSE_DIRECTION ? data.forward : data.backward)
|
||||
{
|
||||
const NodeID to = facade.GetTarget(edge);
|
||||
const EdgeWeight edge_weight = data.weight;
|
||||
BOOST_ASSERT_MSG(edge_weight > 0, "edge_weight invalid");
|
||||
if (query_heap.WasInserted(to))
|
||||
{
|
||||
if (query_heap.GetKey(to) + edge_weight < weight)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
heap.Insert(phantom_node.forward_segment_id.id,
|
||||
weight_sign * phantom_node.GetForwardWeightPlusOffset(),
|
||||
phantom_node.forward_segment_id.id);
|
||||
}
|
||||
if (phantom_node.reverse_segment_id.enabled)
|
||||
{
|
||||
heap.Insert(phantom_node.reverse_segment_id.id,
|
||||
weight_sign * phantom_node.GetReverseWeightPlusOffset(),
|
||||
phantom_node.reverse_segment_id.id);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <bool DIRECTION>
|
||||
void relaxOutgoingEdges(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
const NodeID node,
|
||||
const EdgeWeight weight,
|
||||
SearchEngineData::QueryHeap &heap)
|
||||
void insertNodesInHeap(SearchEngineData::ManyToManyQueryHeap &heap, const PhantomNode &phantom_node)
|
||||
{
|
||||
for (const auto edge : facade.GetAdjacentEdgeRange(node))
|
||||
BOOST_ASSERT(phantom_node.IsValid());
|
||||
|
||||
const auto weight_sign = DIRECTION == FORWARD_DIRECTION ? -1 : 1;
|
||||
if (phantom_node.forward_segment_id.enabled)
|
||||
{
|
||||
const auto &data = facade.GetEdgeData(edge);
|
||||
if (DIRECTION == FORWARD_DIRECTION ? data.forward : data.backward)
|
||||
{
|
||||
const NodeID to = facade.GetTarget(edge);
|
||||
const EdgeWeight edge_weight = data.weight;
|
||||
|
||||
BOOST_ASSERT_MSG(edge_weight > 0, "edge_weight invalid");
|
||||
const EdgeWeight to_weight = weight + edge_weight;
|
||||
|
||||
// New Node discovered -> Add to Heap + Node Info Storage
|
||||
if (!heap.WasInserted(to))
|
||||
{
|
||||
heap.Insert(to, to_weight, node);
|
||||
}
|
||||
// Found a shorter Path -> Update weight
|
||||
else if (to_weight < heap.GetKey(to))
|
||||
{
|
||||
// new parent
|
||||
heap.GetData(to).parent = node;
|
||||
heap.DecreaseKey(to, to_weight);
|
||||
}
|
||||
}
|
||||
heap.Insert(
|
||||
phantom_node.forward_segment_id.id,
|
||||
weight_sign * phantom_node.GetForwardWeightPlusOffset(),
|
||||
{phantom_node.forward_segment_id.id, weight_sign * phantom_node.GetForwardDuration()});
|
||||
}
|
||||
if (phantom_node.reverse_segment_id.enabled)
|
||||
{
|
||||
heap.Insert(
|
||||
phantom_node.reverse_segment_id.id,
|
||||
weight_sign * phantom_node.GetReverseWeightPlusOffset(),
|
||||
{phantom_node.reverse_segment_id.id, weight_sign * phantom_node.GetReverseDuration()});
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
min_edge_offset is needed in case we use multiple
|
||||
nodes as start/target nodes with different (even negative) offsets.
|
||||
In that case the termination criterion is not correct
|
||||
anymore.
|
||||
|
||||
Example:
|
||||
forward heap: a(-100), b(0),
|
||||
reverse heap: c(0), d(100)
|
||||
|
||||
a --- d
|
||||
\ /
|
||||
/ \
|
||||
b --- c
|
||||
|
||||
This is equivalent to running a bi-directional Dijkstra on the following graph:
|
||||
|
||||
a --- d
|
||||
/ \ / \
|
||||
y x z
|
||||
\ / \ /
|
||||
b --- c
|
||||
|
||||
The graph is constructed by inserting nodes y and z that are connected to the initial nodes
|
||||
using edges (y, a) with weight -100, (y, b) with weight 0 and,
|
||||
(d, z) with weight 100, (c, z) with weight 0 corresponding.
|
||||
Since we are dealing with a graph that contains _negative_ edges,
|
||||
we need to add an offset to the termination criterion.
|
||||
*/
|
||||
static constexpr bool ENABLE_STALLING = true;
|
||||
static constexpr bool DISABLE_STALLING = false;
|
||||
static constexpr bool DO_NOT_FORCE_LOOPS = false;
|
||||
template <bool DIRECTION, bool STALLING = ENABLE_STALLING>
|
||||
void routingStep(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
NodeID &middle_node_id,
|
||||
EdgeWeight &upper_bound,
|
||||
EdgeWeight min_edge_offset,
|
||||
const bool force_loop_forward,
|
||||
const bool force_loop_reverse)
|
||||
template <typename Heap>
|
||||
void insertNodesInHeaps(Heap &forward_heap, Heap &reverse_heap, const PhantomNodes &nodes)
|
||||
{
|
||||
const NodeID node = forward_heap.DeleteMin();
|
||||
const EdgeWeight weight = forward_heap.GetKey(node);
|
||||
|
||||
if (reverse_heap.WasInserted(node))
|
||||
{
|
||||
const EdgeWeight new_weight = reverse_heap.GetKey(node) + weight;
|
||||
if (new_weight < upper_bound)
|
||||
{
|
||||
// if loops are forced, they are so at the source
|
||||
if ((force_loop_forward && forward_heap.GetData(node).parent == node) ||
|
||||
(force_loop_reverse && reverse_heap.GetData(node).parent == node) ||
|
||||
// in this case we are looking at a bi-directional way where the source
|
||||
// and target phantom are on the same edge based node
|
||||
new_weight < 0)
|
||||
{
|
||||
// check whether there is a loop present at the node
|
||||
for (const auto edge : facade.GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const auto &data = facade.GetEdgeData(edge);
|
||||
if (DIRECTION == FORWARD_DIRECTION ? data.forward : data.backward)
|
||||
{
|
||||
const NodeID to = facade.GetTarget(edge);
|
||||
if (to == node)
|
||||
{
|
||||
const EdgeWeight edge_weight = data.weight;
|
||||
const EdgeWeight loop_weight = new_weight + edge_weight;
|
||||
if (loop_weight >= 0 && loop_weight < upper_bound)
|
||||
{
|
||||
middle_node_id = node;
|
||||
upper_bound = loop_weight;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
BOOST_ASSERT(new_weight >= 0);
|
||||
|
||||
middle_node_id = node;
|
||||
upper_bound = new_weight;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// make sure we don't terminate too early if we initialize the weight
|
||||
// for the nodes in the forward heap with the forward/reverse offset
|
||||
BOOST_ASSERT(min_edge_offset <= 0);
|
||||
if (weight + min_edge_offset > upper_bound)
|
||||
{
|
||||
forward_heap.DeleteAll();
|
||||
return;
|
||||
}
|
||||
|
||||
// Stalling
|
||||
if (STALLING && stallAtNode<DIRECTION>(facade, node, weight, forward_heap))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
relaxOutgoingEdges<DIRECTION>(facade, node, weight, forward_heap);
|
||||
insertNodesInHeap<FORWARD_DIRECTION>(forward_heap, nodes.source_phantom);
|
||||
insertNodesInHeap<REVERSE_DIRECTION>(reverse_heap, nodes.target_phantom);
|
||||
}
|
||||
|
||||
template <bool UseDuration>
|
||||
EdgeWeight
|
||||
getLoopWeight(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
NodeID node)
|
||||
template <typename FacadeT>
|
||||
void annotatePath(const FacadeT &facade,
|
||||
const NodeID source_node,
|
||||
const NodeID target_node,
|
||||
const std::vector<EdgeID> &unpacked_edges,
|
||||
const PhantomNodes &phantom_node_pair,
|
||||
std::vector<PathData> &unpacked_path)
|
||||
{
|
||||
EdgeWeight loop_weight = UseDuration ? MAXIMAL_EDGE_DURATION : INVALID_EDGE_WEIGHT;
|
||||
for (auto edge : facade.GetAdjacentEdgeRange(node))
|
||||
BOOST_ASSERT(source_node != SPECIAL_NODEID && target_node != SPECIAL_NODEID);
|
||||
BOOST_ASSERT(!unpacked_edges.empty() || source_node == target_node);
|
||||
|
||||
const bool start_traversed_in_reverse =
|
||||
phantom_node_pair.source_phantom.forward_segment_id.id != source_node;
|
||||
const bool target_traversed_in_reverse =
|
||||
phantom_node_pair.target_phantom.forward_segment_id.id != target_node;
|
||||
|
||||
BOOST_ASSERT(phantom_node_pair.source_phantom.forward_segment_id.id == source_node ||
|
||||
phantom_node_pair.source_phantom.reverse_segment_id.id == source_node);
|
||||
BOOST_ASSERT(phantom_node_pair.target_phantom.forward_segment_id.id == target_node ||
|
||||
phantom_node_pair.target_phantom.reverse_segment_id.id == target_node);
|
||||
|
||||
for (auto edge_id : unpacked_edges)
|
||||
{
|
||||
const auto &data = facade.GetEdgeData(edge);
|
||||
if (data.forward)
|
||||
const auto &edge_data = facade.GetEdgeData(edge_id);
|
||||
const auto turn_id = edge_data.turn_id; // edge-based node ID
|
||||
const auto name_index = facade.GetNameIndexFromEdgeID(turn_id);
|
||||
const auto turn_instruction = facade.GetTurnInstructionForEdgeID(turn_id);
|
||||
const extractor::TravelMode travel_mode =
|
||||
(unpacked_path.empty() && start_traversed_in_reverse)
|
||||
? phantom_node_pair.source_phantom.backward_travel_mode
|
||||
: facade.GetTravelModeForEdgeID(turn_id);
|
||||
|
||||
const auto geometry_index = facade.GetGeometryIndexForEdgeID(turn_id);
|
||||
std::vector<NodeID> id_vector;
|
||||
|
||||
std::vector<EdgeWeight> weight_vector;
|
||||
std::vector<EdgeWeight> duration_vector;
|
||||
std::vector<DatasourceID> datasource_vector;
|
||||
if (geometry_index.forward)
|
||||
{
|
||||
const NodeID to = facade.GetTarget(edge);
|
||||
if (to == node)
|
||||
{
|
||||
const auto value = UseDuration ? data.duration : data.weight;
|
||||
loop_weight = std::min(loop_weight, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
return loop_weight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a sequence of connected `NodeID`s in the CH graph, performs a depth-first unpacking of
|
||||
* the shortcut
|
||||
* edges. For every "original" edge found, it calls the `callback` with the two NodeIDs for the
|
||||
* edge, and the EdgeData
|
||||
* for that edge.
|
||||
*
|
||||
* The primary purpose of this unpacking is to expand a path through the CH into the original
|
||||
* route through the
|
||||
* pre-contracted graph.
|
||||
*
|
||||
* Because of the depth-first-search, the `callback` will effectively be called in sequence for
|
||||
* the original route
|
||||
* from beginning to end.
|
||||
*
|
||||
* @param packed_path_begin iterator pointing to the start of the NodeID list
|
||||
* @param packed_path_end iterator pointing to the end of the NodeID list
|
||||
* @param callback void(const std::pair<NodeID, NodeID>, const EdgeData &) called for each
|
||||
* original edge found.
|
||||
*/
|
||||
template <typename BidirectionalIterator, typename Callback>
|
||||
void unpackPath(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
BidirectionalIterator packed_path_begin,
|
||||
BidirectionalIterator packed_path_end,
|
||||
Callback &&callback)
|
||||
{
|
||||
// make sure we have at least something to unpack
|
||||
if (packed_path_begin == packed_path_end)
|
||||
return;
|
||||
|
||||
std::stack<std::pair<NodeID, NodeID>> recursion_stack;
|
||||
|
||||
// We have to push the path in reverse order onto the stack because it's LIFO.
|
||||
for (auto current = std::prev(packed_path_end); current != packed_path_begin;
|
||||
current = std::prev(current))
|
||||
{
|
||||
recursion_stack.emplace(*std::prev(current), *current);
|
||||
}
|
||||
|
||||
std::pair<NodeID, NodeID> edge;
|
||||
while (!recursion_stack.empty())
|
||||
{
|
||||
edge = recursion_stack.top();
|
||||
recursion_stack.pop();
|
||||
|
||||
// Look for an edge on the forward CH graph (.forward)
|
||||
EdgeID smaller_edge_id = facade.FindSmallestEdge(
|
||||
edge.first, edge.second, [](const auto &data) { return data.forward; });
|
||||
|
||||
// If we didn't find one there, the we might be looking at a part of the path that
|
||||
// was found using the backward search. Here, we flip the node order (.second, .first)
|
||||
// and only consider edges with the `.backward` flag.
|
||||
if (SPECIAL_EDGEID == smaller_edge_id)
|
||||
{
|
||||
smaller_edge_id = facade.FindSmallestEdge(
|
||||
edge.second, edge.first, [](const auto &data) { return data.backward; });
|
||||
}
|
||||
|
||||
// If we didn't find anything *still*, then something is broken and someone has
|
||||
// called this function with bad values.
|
||||
BOOST_ASSERT_MSG(smaller_edge_id != SPECIAL_EDGEID, "Invalid smaller edge ID");
|
||||
|
||||
const auto &data = facade.GetEdgeData(smaller_edge_id);
|
||||
BOOST_ASSERT_MSG(data.weight != std::numeric_limits<EdgeWeight>::max(),
|
||||
"edge weight invalid");
|
||||
|
||||
// If the edge is a shortcut, we need to add the two halfs to the stack.
|
||||
if (data.shortcut)
|
||||
{ // unpack
|
||||
const NodeID middle_node_id = data.id;
|
||||
// Note the order here - we're adding these to a stack, so we
|
||||
// want the first->middle to get visited before middle->second
|
||||
recursion_stack.emplace(middle_node_id, edge.second);
|
||||
recursion_stack.emplace(edge.first, middle_node_id);
|
||||
id_vector = facade.GetUncompressedForwardGeometry(geometry_index.id);
|
||||
weight_vector = facade.GetUncompressedForwardWeights(geometry_index.id);
|
||||
duration_vector = facade.GetUncompressedForwardDurations(geometry_index.id);
|
||||
datasource_vector = facade.GetUncompressedForwardDatasources(geometry_index.id);
|
||||
}
|
||||
else
|
||||
{
|
||||
// We found an original edge, call our callback.
|
||||
std::forward<Callback>(callback)(edge, data);
|
||||
id_vector = facade.GetUncompressedReverseGeometry(geometry_index.id);
|
||||
weight_vector = facade.GetUncompressedReverseWeights(geometry_index.id);
|
||||
duration_vector = facade.GetUncompressedReverseDurations(geometry_index.id);
|
||||
datasource_vector = facade.GetUncompressedReverseDatasources(geometry_index.id);
|
||||
}
|
||||
BOOST_ASSERT(id_vector.size() > 0);
|
||||
BOOST_ASSERT(datasource_vector.size() > 0);
|
||||
BOOST_ASSERT(weight_vector.size() == id_vector.size() - 1);
|
||||
BOOST_ASSERT(duration_vector.size() == id_vector.size() - 1);
|
||||
const bool is_first_segment = unpacked_path.empty();
|
||||
|
||||
const std::size_t start_index =
|
||||
(is_first_segment ? ((start_traversed_in_reverse)
|
||||
? weight_vector.size() -
|
||||
phantom_node_pair.source_phantom.fwd_segment_position - 1
|
||||
: phantom_node_pair.source_phantom.fwd_segment_position)
|
||||
: 0);
|
||||
const std::size_t end_index = weight_vector.size();
|
||||
|
||||
BOOST_ASSERT(start_index >= 0);
|
||||
BOOST_ASSERT(start_index < end_index);
|
||||
for (std::size_t segment_idx = start_index; segment_idx < end_index; ++segment_idx)
|
||||
{
|
||||
unpacked_path.push_back(PathData{id_vector[segment_idx + 1],
|
||||
name_index,
|
||||
weight_vector[segment_idx],
|
||||
duration_vector[segment_idx],
|
||||
extractor::guidance::TurnInstruction::NO_TURN(),
|
||||
{{0, INVALID_LANEID}, INVALID_LANE_DESCRIPTIONID},
|
||||
travel_mode,
|
||||
INVALID_ENTRY_CLASSID,
|
||||
datasource_vector[segment_idx],
|
||||
util::guidance::TurnBearing(0),
|
||||
util::guidance::TurnBearing(0)});
|
||||
}
|
||||
BOOST_ASSERT(unpacked_path.size() > 0);
|
||||
if (facade.hasLaneData(turn_id))
|
||||
unpacked_path.back().lane_data = facade.GetLaneData(turn_id);
|
||||
|
||||
unpacked_path.back().entry_classid = facade.GetEntryClassID(turn_id);
|
||||
unpacked_path.back().turn_instruction = turn_instruction;
|
||||
unpacked_path.back().duration_until_turn += facade.GetDurationPenaltyForEdgeID(turn_id);
|
||||
unpacked_path.back().weight_until_turn += facade.GetWeightPenaltyForEdgeID(turn_id);
|
||||
unpacked_path.back().pre_turn_bearing = facade.PreTurnBearing(turn_id);
|
||||
unpacked_path.back().post_turn_bearing = facade.PostTurnBearing(turn_id);
|
||||
}
|
||||
}
|
||||
|
||||
// Should work both for CH and not CH if the unpackPath function above is implemented a proper
|
||||
// implementation.
|
||||
template <typename RandomIter, typename FacadeT>
|
||||
void unpackPath(const FacadeT &facade,
|
||||
RandomIter packed_path_begin,
|
||||
RandomIter packed_path_end,
|
||||
const PhantomNodes &phantom_node_pair,
|
||||
std::vector<PathData> &unpacked_path)
|
||||
{
|
||||
BOOST_ASSERT(std::distance(packed_path_begin, packed_path_end) > 0);
|
||||
|
||||
const bool start_traversed_in_reverse =
|
||||
(*packed_path_begin != phantom_node_pair.source_phantom.forward_segment_id.id);
|
||||
const bool target_traversed_in_reverse =
|
||||
(*std::prev(packed_path_end) != phantom_node_pair.target_phantom.forward_segment_id.id);
|
||||
|
||||
BOOST_ASSERT(*packed_path_begin == phantom_node_pair.source_phantom.forward_segment_id.id ||
|
||||
*packed_path_begin == phantom_node_pair.source_phantom.reverse_segment_id.id);
|
||||
BOOST_ASSERT(
|
||||
*std::prev(packed_path_end) == phantom_node_pair.target_phantom.forward_segment_id.id ||
|
||||
*std::prev(packed_path_end) == phantom_node_pair.target_phantom.reverse_segment_id.id);
|
||||
|
||||
unpackPath(
|
||||
facade,
|
||||
packed_path_begin,
|
||||
packed_path_end,
|
||||
[&facade,
|
||||
&unpacked_path,
|
||||
&phantom_node_pair,
|
||||
&start_traversed_in_reverse,
|
||||
&target_traversed_in_reverse](std::pair<NodeID, NodeID> & /* edge */,
|
||||
const auto &edge_data) {
|
||||
|
||||
BOOST_ASSERT_MSG(!edge_data.shortcut, "original edge flagged as shortcut");
|
||||
const auto name_index = facade.GetNameIndexFromEdgeID(edge_data.id);
|
||||
const auto turn_instruction = facade.GetTurnInstructionForEdgeID(edge_data.id);
|
||||
const extractor::TravelMode travel_mode =
|
||||
(unpacked_path.empty() && start_traversed_in_reverse)
|
||||
? phantom_node_pair.source_phantom.backward_travel_mode
|
||||
: facade.GetTravelModeForEdgeID(edge_data.id);
|
||||
|
||||
const auto geometry_index = facade.GetGeometryIndexForEdgeID(edge_data.id);
|
||||
std::vector<NodeID> id_vector;
|
||||
|
||||
std::vector<EdgeWeight> weight_vector;
|
||||
std::vector<EdgeWeight> duration_vector;
|
||||
std::vector<DatasourceID> datasource_vector;
|
||||
if (geometry_index.forward)
|
||||
{
|
||||
id_vector = facade.GetUncompressedForwardGeometry(geometry_index.id);
|
||||
weight_vector = facade.GetUncompressedForwardWeights(geometry_index.id);
|
||||
duration_vector = facade.GetUncompressedForwardDurations(geometry_index.id);
|
||||
datasource_vector = facade.GetUncompressedForwardDatasources(geometry_index.id);
|
||||
}
|
||||
else
|
||||
{
|
||||
id_vector = facade.GetUncompressedReverseGeometry(geometry_index.id);
|
||||
weight_vector = facade.GetUncompressedReverseWeights(geometry_index.id);
|
||||
duration_vector = facade.GetUncompressedReverseDurations(geometry_index.id);
|
||||
datasource_vector = facade.GetUncompressedReverseDatasources(geometry_index.id);
|
||||
}
|
||||
BOOST_ASSERT(id_vector.size() > 0);
|
||||
BOOST_ASSERT(datasource_vector.size() > 0);
|
||||
BOOST_ASSERT(weight_vector.size() == id_vector.size() - 1);
|
||||
BOOST_ASSERT(duration_vector.size() == id_vector.size() - 1);
|
||||
const bool is_first_segment = unpacked_path.empty();
|
||||
|
||||
const std::size_t start_index =
|
||||
(is_first_segment
|
||||
? ((start_traversed_in_reverse)
|
||||
? weight_vector.size() -
|
||||
phantom_node_pair.source_phantom.fwd_segment_position - 1
|
||||
: phantom_node_pair.source_phantom.fwd_segment_position)
|
||||
: 0);
|
||||
const std::size_t end_index = weight_vector.size();
|
||||
|
||||
BOOST_ASSERT(start_index >= 0);
|
||||
BOOST_ASSERT(start_index < end_index);
|
||||
for (std::size_t segment_idx = start_index; segment_idx < end_index; ++segment_idx)
|
||||
{
|
||||
unpacked_path.push_back(PathData{id_vector[segment_idx + 1],
|
||||
name_index,
|
||||
weight_vector[segment_idx],
|
||||
duration_vector[segment_idx],
|
||||
extractor::guidance::TurnInstruction::NO_TURN(),
|
||||
{{0, INVALID_LANEID}, INVALID_LANE_DESCRIPTIONID},
|
||||
travel_mode,
|
||||
INVALID_ENTRY_CLASSID,
|
||||
datasource_vector[segment_idx],
|
||||
util::guidance::TurnBearing(0),
|
||||
util::guidance::TurnBearing(0)});
|
||||
}
|
||||
BOOST_ASSERT(unpacked_path.size() > 0);
|
||||
if (facade.hasLaneData(edge_data.id))
|
||||
unpacked_path.back().lane_data = facade.GetLaneData(edge_data.id);
|
||||
|
||||
unpacked_path.back().entry_classid = facade.GetEntryClassID(edge_data.id);
|
||||
unpacked_path.back().turn_instruction = turn_instruction;
|
||||
unpacked_path.back().duration_until_turn +=
|
||||
facade.GetDurationPenaltyForEdgeID(edge_data.id);
|
||||
unpacked_path.back().weight_until_turn +=
|
||||
facade.GetWeightPenaltyForEdgeID(edge_data.id);
|
||||
unpacked_path.back().pre_turn_bearing = facade.PreTurnBearing(edge_data.id);
|
||||
unpacked_path.back().post_turn_bearing = facade.PostTurnBearing(edge_data.id);
|
||||
});
|
||||
|
||||
std::size_t start_index = 0, end_index = 0;
|
||||
std::vector<unsigned> id_vector;
|
||||
@ -537,140 +305,8 @@ void unpackPath(const FacadeT &facade,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Unpacks a single edge (NodeID->NodeID) from the CH graph down to it's original non-shortcut
|
||||
* route.
|
||||
* @param from the node the CH edge starts at
|
||||
* @param to the node the CH edge finishes at
|
||||
* @param unpacked_path the sequence of original NodeIDs that make up the expanded CH edge
|
||||
*/
|
||||
void unpackEdge(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
const NodeID from,
|
||||
const NodeID to,
|
||||
std::vector<NodeID> &unpacked_path);
|
||||
|
||||
void retrievePackedPathFromHeap(const SearchEngineData::QueryHeap &forward_heap,
|
||||
const SearchEngineData::QueryHeap &reverse_heap,
|
||||
const NodeID middle_node_id,
|
||||
std::vector<NodeID> &packed_path);
|
||||
|
||||
void retrievePackedPathFromSingleHeap(const SearchEngineData::QueryHeap &search_heap,
|
||||
const NodeID middle_node_id,
|
||||
std::vector<NodeID> &packed_path);
|
||||
|
||||
// assumes that heaps are already setup correctly.
|
||||
// ATTENTION: This only works if no additional offset is supplied next to the Phantom Node
|
||||
// Offsets.
|
||||
// In case additional offsets are supplied, you might have to force a loop first.
|
||||
// A forced loop might be necessary, if source and target are on the same segment.
|
||||
// If this is the case and the offsets of the respective direction are larger for the source
|
||||
// than the target
|
||||
// then a force loop is required (e.g. source_phantom.forward_segment_id ==
|
||||
// target_phantom.forward_segment_id
|
||||
// && source_phantom.GetForwardWeightPlusOffset() > target_phantom.GetForwardWeightPlusOffset())
|
||||
// requires
|
||||
// a force loop, if the heaps have been initialized with positive offsets.
|
||||
void search(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
std::int32_t &weight,
|
||||
std::vector<NodeID> &packed_leg,
|
||||
const bool force_loop_forward,
|
||||
const bool force_loop_reverse,
|
||||
const int duration_upper_bound = INVALID_EDGE_WEIGHT);
|
||||
|
||||
// Alias to be compatible with the overload for CoreCH that needs 4 heaps
|
||||
inline void search(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
SearchEngineData::QueryHeap &,
|
||||
SearchEngineData::QueryHeap &,
|
||||
std::int32_t &weight,
|
||||
std::vector<NodeID> &packed_leg,
|
||||
const bool force_loop_forward,
|
||||
const bool force_loop_reverse,
|
||||
const int duration_upper_bound = INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
weight,
|
||||
packed_leg,
|
||||
force_loop_forward,
|
||||
force_loop_reverse,
|
||||
duration_upper_bound);
|
||||
}
|
||||
|
||||
// assumes that heaps are already setup correctly.
|
||||
// A forced loop might be necessary, if source and target are on the same segment.
|
||||
// If this is the case and the offsets of the respective direction are larger for the source
|
||||
// than the target
|
||||
// then a force loop is required (e.g. source_phantom.forward_segment_id ==
|
||||
// target_phantom.forward_segment_id
|
||||
// && source_phantom.GetForwardWeightPlusOffset() > target_phantom.GetForwardWeightPlusOffset())
|
||||
// requires
|
||||
// a force loop, if the heaps have been initialized with positive offsets.
|
||||
void search(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CoreCH> &facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
SearchEngineData::QueryHeap &forward_core_heap,
|
||||
SearchEngineData::QueryHeap &reverse_core_heap,
|
||||
int &weight,
|
||||
std::vector<NodeID> &packed_leg,
|
||||
const bool force_loop_forward,
|
||||
const bool force_loop_reverse,
|
||||
int duration_upper_bound = INVALID_EDGE_WEIGHT);
|
||||
|
||||
bool needsLoopForward(const PhantomNode &source_phantom, const PhantomNode &target_phantom);
|
||||
|
||||
bool needsLoopBackwards(const PhantomNode &source_phantom, const PhantomNode &target_phantom);
|
||||
|
||||
double getPathDistance(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
const std::vector<NodeID> &packed_path,
|
||||
const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom);
|
||||
|
||||
// Requires the heaps for be empty
|
||||
// If heaps should be adjusted to be initialized outside of this function,
|
||||
// the addition of force_loop parameters might be required
|
||||
double
|
||||
getNetworkDistance(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CoreCH> &facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
SearchEngineData::QueryHeap &forward_core_heap,
|
||||
SearchEngineData::QueryHeap &reverse_core_heap,
|
||||
const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom,
|
||||
int duration_upper_bound = INVALID_EDGE_WEIGHT);
|
||||
|
||||
// Requires the heaps for be empty
|
||||
// If heaps should be adjusted to be initialized outside of this function,
|
||||
// the addition of force_loop parameters might be required
|
||||
double
|
||||
getNetworkDistance(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom,
|
||||
int duration_upper_bound = INVALID_EDGE_WEIGHT);
|
||||
|
||||
// Alias to be compatible with the overload for CoreCH that needs 4 heaps
|
||||
inline double
|
||||
getNetworkDistance(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
SearchEngineData::QueryHeap &,
|
||||
SearchEngineData::QueryHeap &,
|
||||
const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom,
|
||||
int duration_upper_bound = INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
return getNetworkDistance(
|
||||
facade, forward_heap, reverse_heap, source_phantom, target_phantom, duration_upper_bound);
|
||||
}
|
||||
|
||||
} // namespace routing_algorithms
|
||||
} // namespace engine
|
||||
} // namespace osrm
|
||||
|
||||
#endif // ROUTING_BASE_HPP
|
||||
#endif // OSRM_ENGINE_ROUTING_BASE_HPP
|
||||
|
458
include/engine/routing_algorithms/routing_base_ch.hpp
Normal file
458
include/engine/routing_algorithms/routing_base_ch.hpp
Normal file
@ -0,0 +1,458 @@
|
||||
#ifndef OSRM_ENGINE_ROUTING_BASE_CH_HPP
|
||||
#define OSRM_ENGINE_ROUTING_BASE_CH_HPP
|
||||
|
||||
#include "engine/algorithm.hpp"
|
||||
#include "engine/datafacade/contiguous_internalmem_datafacade.hpp"
|
||||
#include "engine/routing_algorithms/routing_base.hpp"
|
||||
#include "engine/search_engine_data.hpp"
|
||||
|
||||
#include "util/typedefs.hpp"
|
||||
|
||||
#include <boost/assert.hpp>
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace engine
|
||||
{
|
||||
|
||||
namespace routing_algorithms
|
||||
{
|
||||
|
||||
namespace ch
|
||||
{
|
||||
|
||||
// Stalling
|
||||
template <bool DIRECTION, typename HeapT>
|
||||
bool stallAtNode(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
const NodeID node,
|
||||
const EdgeWeight weight,
|
||||
const HeapT &query_heap)
|
||||
{
|
||||
for (auto edge : facade.GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const auto &data = facade.GetEdgeData(edge);
|
||||
if (DIRECTION == REVERSE_DIRECTION ? data.forward : data.backward)
|
||||
{
|
||||
const NodeID to = facade.GetTarget(edge);
|
||||
const EdgeWeight edge_weight = data.weight;
|
||||
BOOST_ASSERT_MSG(edge_weight > 0, "edge_weight invalid");
|
||||
if (query_heap.WasInserted(to))
|
||||
{
|
||||
if (query_heap.GetKey(to) + edge_weight < weight)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <bool DIRECTION>
|
||||
void relaxOutgoingEdges(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
const NodeID node,
|
||||
const EdgeWeight weight,
|
||||
SearchEngineData::QueryHeap &heap)
|
||||
{
|
||||
for (const auto edge : facade.GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const auto &data = facade.GetEdgeData(edge);
|
||||
if (DIRECTION == FORWARD_DIRECTION ? data.forward : data.backward)
|
||||
{
|
||||
const NodeID to = facade.GetTarget(edge);
|
||||
const EdgeWeight edge_weight = data.weight;
|
||||
|
||||
BOOST_ASSERT_MSG(edge_weight > 0, "edge_weight invalid");
|
||||
const EdgeWeight to_weight = weight + edge_weight;
|
||||
|
||||
// New Node discovered -> Add to Heap + Node Info Storage
|
||||
if (!heap.WasInserted(to))
|
||||
{
|
||||
heap.Insert(to, to_weight, node);
|
||||
}
|
||||
// Found a shorter Path -> Update weight
|
||||
else if (to_weight < heap.GetKey(to))
|
||||
{
|
||||
// new parent
|
||||
heap.GetData(to).parent = node;
|
||||
heap.DecreaseKey(to, to_weight);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
min_edge_offset is needed in case we use multiple
|
||||
nodes as start/target nodes with different (even negative) offsets.
|
||||
In that case the termination criterion is not correct
|
||||
anymore.
|
||||
|
||||
Example:
|
||||
forward heap: a(-100), b(0),
|
||||
reverse heap: c(0), d(100)
|
||||
|
||||
a --- d
|
||||
\ /
|
||||
/ \
|
||||
b --- c
|
||||
|
||||
This is equivalent to running a bi-directional Dijkstra on the following graph:
|
||||
|
||||
a --- d
|
||||
/ \ / \
|
||||
y x z
|
||||
\ / \ /
|
||||
b --- c
|
||||
|
||||
The graph is constructed by inserting nodes y and z that are connected to the initial nodes
|
||||
using edges (y, a) with weight -100, (y, b) with weight 0 and,
|
||||
(d, z) with weight 100, (c, z) with weight 0 corresponding.
|
||||
Since we are dealing with a graph that contains _negative_ edges,
|
||||
we need to add an offset to the termination criterion.
|
||||
*/
|
||||
static constexpr bool ENABLE_STALLING = true;
|
||||
static constexpr bool DISABLE_STALLING = false;
|
||||
template <bool DIRECTION, bool STALLING = ENABLE_STALLING>
|
||||
void routingStep(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
NodeID &middle_node_id,
|
||||
EdgeWeight &upper_bound,
|
||||
EdgeWeight min_edge_offset,
|
||||
const bool force_loop_forward,
|
||||
const bool force_loop_reverse)
|
||||
{
|
||||
const NodeID node = forward_heap.DeleteMin();
|
||||
const EdgeWeight weight = forward_heap.GetKey(node);
|
||||
|
||||
if (reverse_heap.WasInserted(node))
|
||||
{
|
||||
const EdgeWeight new_weight = reverse_heap.GetKey(node) + weight;
|
||||
if (new_weight < upper_bound)
|
||||
{
|
||||
// if loops are forced, they are so at the source
|
||||
if ((force_loop_forward && forward_heap.GetData(node).parent == node) ||
|
||||
(force_loop_reverse && reverse_heap.GetData(node).parent == node) ||
|
||||
// in this case we are looking at a bi-directional way where the source
|
||||
// and target phantom are on the same edge based node
|
||||
new_weight < 0)
|
||||
{
|
||||
// check whether there is a loop present at the node
|
||||
for (const auto edge : facade.GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const auto &data = facade.GetEdgeData(edge);
|
||||
if (DIRECTION == FORWARD_DIRECTION ? data.forward : data.backward)
|
||||
{
|
||||
const NodeID to = facade.GetTarget(edge);
|
||||
if (to == node)
|
||||
{
|
||||
const EdgeWeight edge_weight = data.weight;
|
||||
const EdgeWeight loop_weight = new_weight + edge_weight;
|
||||
if (loop_weight >= 0 && loop_weight < upper_bound)
|
||||
{
|
||||
middle_node_id = node;
|
||||
upper_bound = loop_weight;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
BOOST_ASSERT(new_weight >= 0);
|
||||
|
||||
middle_node_id = node;
|
||||
upper_bound = new_weight;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// make sure we don't terminate too early if we initialize the weight
|
||||
// for the nodes in the forward heap with the forward/reverse offset
|
||||
BOOST_ASSERT(min_edge_offset <= 0);
|
||||
if (weight + min_edge_offset > upper_bound)
|
||||
{
|
||||
forward_heap.DeleteAll();
|
||||
return;
|
||||
}
|
||||
|
||||
// Stalling
|
||||
if (STALLING && stallAtNode<DIRECTION>(facade, node, weight, forward_heap))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
relaxOutgoingEdges<DIRECTION>(facade, node, weight, forward_heap);
|
||||
}
|
||||
|
||||
template <bool UseDuration>
|
||||
EdgeWeight
|
||||
getLoopWeight(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
NodeID node)
|
||||
{
|
||||
EdgeWeight loop_weight = UseDuration ? MAXIMAL_EDGE_DURATION : INVALID_EDGE_WEIGHT;
|
||||
for (auto edge : facade.GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const auto &data = facade.GetEdgeData(edge);
|
||||
if (data.forward)
|
||||
{
|
||||
const NodeID to = facade.GetTarget(edge);
|
||||
if (to == node)
|
||||
{
|
||||
const auto value = UseDuration ? data.duration : data.weight;
|
||||
loop_weight = std::min(loop_weight, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
return loop_weight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a sequence of connected `NodeID`s in the CH graph, performs a depth-first unpacking of
|
||||
* the shortcut
|
||||
* edges. For every "original" edge found, it calls the `callback` with the two NodeIDs for the
|
||||
* edge, and the EdgeData
|
||||
* for that edge.
|
||||
*
|
||||
* The primary purpose of this unpacking is to expand a path through the CH into the original
|
||||
* route through the
|
||||
* pre-contracted graph.
|
||||
*
|
||||
* Because of the depth-first-search, the `callback` will effectively be called in sequence for
|
||||
* the original route
|
||||
* from beginning to end.
|
||||
*
|
||||
* @param packed_path_begin iterator pointing to the start of the NodeID list
|
||||
* @param packed_path_end iterator pointing to the end of the NodeID list
|
||||
* @param callback void(const std::pair<NodeID, NodeID>, const EdgeID &) called for each
|
||||
* original edge found.
|
||||
*/
|
||||
template <typename BidirectionalIterator, typename Callback>
|
||||
void unpackPath(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
BidirectionalIterator packed_path_begin,
|
||||
BidirectionalIterator packed_path_end,
|
||||
Callback &&callback)
|
||||
{
|
||||
// make sure we have at least something to unpack
|
||||
if (packed_path_begin == packed_path_end)
|
||||
return;
|
||||
|
||||
std::stack<std::pair<NodeID, NodeID>> recursion_stack;
|
||||
|
||||
// We have to push the path in reverse order onto the stack because it's LIFO.
|
||||
for (auto current = std::prev(packed_path_end); current != packed_path_begin;
|
||||
current = std::prev(current))
|
||||
{
|
||||
recursion_stack.emplace(*std::prev(current), *current);
|
||||
}
|
||||
|
||||
std::pair<NodeID, NodeID> edge;
|
||||
while (!recursion_stack.empty())
|
||||
{
|
||||
edge = recursion_stack.top();
|
||||
recursion_stack.pop();
|
||||
|
||||
// Look for an edge on the forward CH graph (.forward)
|
||||
EdgeID smaller_edge_id = facade.FindSmallestEdge(
|
||||
edge.first, edge.second, [](const auto &data) { return data.forward; });
|
||||
|
||||
// If we didn't find one there, the we might be looking at a part of the path that
|
||||
// was found using the backward search. Here, we flip the node order (.second, .first)
|
||||
// and only consider edges with the `.backward` flag.
|
||||
if (SPECIAL_EDGEID == smaller_edge_id)
|
||||
{
|
||||
smaller_edge_id = facade.FindSmallestEdge(
|
||||
edge.second, edge.first, [](const auto &data) { return data.backward; });
|
||||
}
|
||||
|
||||
// If we didn't find anything *still*, then something is broken and someone has
|
||||
// called this function with bad values.
|
||||
BOOST_ASSERT_MSG(smaller_edge_id != SPECIAL_EDGEID, "Invalid smaller edge ID");
|
||||
|
||||
const auto &data = facade.GetEdgeData(smaller_edge_id);
|
||||
BOOST_ASSERT_MSG(data.weight != std::numeric_limits<EdgeWeight>::max(),
|
||||
"edge weight invalid");
|
||||
|
||||
// If the edge is a shortcut, we need to add the two halfs to the stack.
|
||||
if (data.shortcut)
|
||||
{ // unpack
|
||||
const NodeID middle_node_id = data.turn_id;
|
||||
// Note the order here - we're adding these to a stack, so we
|
||||
// want the first->middle to get visited before middle->second
|
||||
recursion_stack.emplace(middle_node_id, edge.second);
|
||||
recursion_stack.emplace(edge.first, middle_node_id);
|
||||
}
|
||||
else
|
||||
{
|
||||
// We found an original edge, call our callback.
|
||||
std::forward<Callback>(callback)(edge, smaller_edge_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename RandomIter, typename FacadeT>
|
||||
void unpackPath(const FacadeT &facade,
|
||||
RandomIter packed_path_begin,
|
||||
RandomIter packed_path_end,
|
||||
const PhantomNodes &phantom_nodes,
|
||||
std::vector<PathData> &unpacked_path)
|
||||
{
|
||||
const auto nodes_number = std::distance(packed_path_begin, packed_path_end);
|
||||
BOOST_ASSERT(nodes_number > 0);
|
||||
|
||||
std::vector<EdgeID> unpacked_edges;
|
||||
|
||||
auto source_node = *packed_path_begin, target_node = *packed_path_begin;
|
||||
if (nodes_number > 1)
|
||||
{
|
||||
target_node = *std::prev(packed_path_end);
|
||||
unpacked_edges.reserve(std::distance(packed_path_begin, packed_path_end));
|
||||
unpackPath(
|
||||
facade,
|
||||
packed_path_begin,
|
||||
packed_path_end,
|
||||
[&facade, &unpacked_edges](std::pair<NodeID, NodeID> & /* edge */,
|
||||
const auto &edge_id) { unpacked_edges.push_back(edge_id); });
|
||||
}
|
||||
|
||||
annotatePath(facade, source_node, target_node, unpacked_edges, phantom_nodes, unpacked_path);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unpacks a single edge (NodeID->NodeID) from the CH graph down to it's original non-shortcut
|
||||
* route.
|
||||
* @param from the node the CH edge starts at
|
||||
* @param to the node the CH edge finishes at
|
||||
* @param unpacked_path the sequence of original NodeIDs that make up the expanded CH edge
|
||||
*/
|
||||
void unpackEdge(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
const NodeID from,
|
||||
const NodeID to,
|
||||
std::vector<NodeID> &unpacked_path);
|
||||
|
||||
void retrievePackedPathFromHeap(const SearchEngineData::QueryHeap &forward_heap,
|
||||
const SearchEngineData::QueryHeap &reverse_heap,
|
||||
const NodeID middle_node_id,
|
||||
std::vector<NodeID> &packed_path);
|
||||
|
||||
void retrievePackedPathFromSingleHeap(const SearchEngineData::QueryHeap &search_heap,
|
||||
const NodeID middle_node_id,
|
||||
std::vector<NodeID> &packed_path);
|
||||
|
||||
// assumes that heaps are already setup correctly.
|
||||
// ATTENTION: This only works if no additional offset is supplied next to the Phantom Node
|
||||
// Offsets.
|
||||
// In case additional offsets are supplied, you might have to force a loop first.
|
||||
// A forced loop might be necessary, if source and target are on the same segment.
|
||||
// If this is the case and the offsets of the respective direction are larger for the source
|
||||
// than the target
|
||||
// then a force loop is required (e.g. source_phantom.forward_segment_id ==
|
||||
// target_phantom.forward_segment_id
|
||||
// && source_phantom.GetForwardWeightPlusOffset() > target_phantom.GetForwardWeightPlusOffset())
|
||||
// requires
|
||||
// a force loop, if the heaps have been initialized with positive offsets.
|
||||
void search(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
std::int32_t &weight,
|
||||
std::vector<NodeID> &packed_leg,
|
||||
const bool force_loop_forward,
|
||||
const bool force_loop_reverse,
|
||||
const int duration_upper_bound = INVALID_EDGE_WEIGHT);
|
||||
|
||||
// Alias to be compatible with the overload for CoreCH that needs 4 heaps
|
||||
inline void search(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
SearchEngineData::QueryHeap &,
|
||||
SearchEngineData::QueryHeap &,
|
||||
std::int32_t &weight,
|
||||
std::vector<NodeID> &packed_leg,
|
||||
const bool force_loop_forward,
|
||||
const bool force_loop_reverse,
|
||||
const int duration_upper_bound = INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
weight,
|
||||
packed_leg,
|
||||
force_loop_forward,
|
||||
force_loop_reverse,
|
||||
duration_upper_bound);
|
||||
}
|
||||
|
||||
// assumes that heaps are already setup correctly.
|
||||
// A forced loop might be necessary, if source and target are on the same segment.
|
||||
// If this is the case and the offsets of the respective direction are larger for the source
|
||||
// than the target
|
||||
// then a force loop is required (e.g. source_phantom.forward_segment_id ==
|
||||
// target_phantom.forward_segment_id
|
||||
// && source_phantom.GetForwardWeightPlusOffset() > target_phantom.GetForwardWeightPlusOffset())
|
||||
// requires
|
||||
// a force loop, if the heaps have been initialized with positive offsets.
|
||||
void search(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CoreCH> &facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
SearchEngineData::QueryHeap &forward_core_heap,
|
||||
SearchEngineData::QueryHeap &reverse_core_heap,
|
||||
int &weight,
|
||||
std::vector<NodeID> &packed_leg,
|
||||
const bool force_loop_forward,
|
||||
const bool force_loop_reverse,
|
||||
int duration_upper_bound = INVALID_EDGE_WEIGHT);
|
||||
|
||||
bool needsLoopForward(const PhantomNode &source_phantom, const PhantomNode &target_phantom);
|
||||
|
||||
bool needsLoopBackwards(const PhantomNode &source_phantom, const PhantomNode &target_phantom);
|
||||
|
||||
double getPathDistance(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
const std::vector<NodeID> &packed_path,
|
||||
const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom);
|
||||
|
||||
// Requires the heaps for be empty
|
||||
// If heaps should be adjusted to be initialized outside of this function,
|
||||
// the addition of force_loop parameters might be required
|
||||
double
|
||||
getNetworkDistance(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CoreCH> &facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
SearchEngineData::QueryHeap &forward_core_heap,
|
||||
SearchEngineData::QueryHeap &reverse_core_heap,
|
||||
const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom,
|
||||
int duration_upper_bound = INVALID_EDGE_WEIGHT);
|
||||
|
||||
// Requires the heaps for be empty
|
||||
// If heaps should be adjusted to be initialized outside of this function,
|
||||
// the addition of force_loop parameters might be required
|
||||
double
|
||||
getNetworkDistance(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom,
|
||||
int duration_upper_bound = INVALID_EDGE_WEIGHT);
|
||||
|
||||
// Alias to be compatible with the overload for CoreCH that needs 4 heaps
|
||||
inline double
|
||||
getNetworkDistance(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
SearchEngineData::QueryHeap &,
|
||||
SearchEngineData::QueryHeap &,
|
||||
const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom,
|
||||
int duration_upper_bound = INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
return getNetworkDistance(
|
||||
facade, forward_heap, reverse_heap, source_phantom, target_phantom, duration_upper_bound);
|
||||
}
|
||||
|
||||
} // namespace ch
|
||||
} // namespace routing_algorithms
|
||||
} // namespace engine
|
||||
} // namespace osrm
|
||||
|
||||
#endif // OSRM_ENGINE_ROUTING_BASE_CH_HPP
|
288
include/engine/routing_algorithms/routing_base_mld.hpp
Normal file
288
include/engine/routing_algorithms/routing_base_mld.hpp
Normal file
@ -0,0 +1,288 @@
|
||||
#ifndef OSRM_ENGINE_ROUTING_BASE_MLD_HPP
|
||||
#define OSRM_ENGINE_ROUTING_BASE_MLD_HPP
|
||||
|
||||
#include "engine/algorithm.hpp"
|
||||
#include "engine/datafacade/contiguous_internalmem_datafacade.hpp"
|
||||
#include "engine/routing_algorithms/routing_base.hpp"
|
||||
#include "engine/search_engine_data.hpp"
|
||||
|
||||
#include "util/typedefs.hpp"
|
||||
|
||||
#include <boost/assert.hpp>
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace engine
|
||||
{
|
||||
namespace routing_algorithms
|
||||
{
|
||||
namespace mld
|
||||
{
|
||||
|
||||
namespace
|
||||
{
|
||||
// Unrestricted search (Args is const PhantomNodes &):
|
||||
// * use partition.GetQueryLevel to find the node query level based on source and target phantoms
|
||||
// * allow to traverse all cells
|
||||
LevelID getNodeQureyLevel(const partition::MultiLevelPartitionView &partition,
|
||||
NodeID node,
|
||||
const PhantomNodes &phantom_nodes)
|
||||
{
|
||||
auto level = [&partition, node](const SegmentID &source, const SegmentID &target) {
|
||||
if (source.enabled && target.enabled)
|
||||
return partition.GetQueryLevel(source.id, target.id, node);
|
||||
return INVALID_LEVEL_ID;
|
||||
};
|
||||
return std::min(std::min(level(phantom_nodes.source_phantom.forward_segment_id,
|
||||
phantom_nodes.target_phantom.forward_segment_id),
|
||||
level(phantom_nodes.source_phantom.forward_segment_id,
|
||||
phantom_nodes.target_phantom.reverse_segment_id)),
|
||||
std::min(level(phantom_nodes.source_phantom.reverse_segment_id,
|
||||
phantom_nodes.target_phantom.forward_segment_id),
|
||||
level(phantom_nodes.source_phantom.reverse_segment_id,
|
||||
phantom_nodes.target_phantom.reverse_segment_id)));
|
||||
}
|
||||
|
||||
bool checkParentCellRestriction(CellID, const PhantomNodes &) { return true; }
|
||||
|
||||
// Restricted search (Args is LevelID, CellID):
|
||||
// * use the fixed level for queries
|
||||
// * check if the node cell is the same as the specified parent onr
|
||||
LevelID getNodeQureyLevel(const partition::MultiLevelPartitionView &, NodeID, LevelID level, CellID)
|
||||
{
|
||||
return level;
|
||||
}
|
||||
|
||||
bool checkParentCellRestriction(CellID cell, LevelID, CellID parent) { return cell == parent; }
|
||||
}
|
||||
|
||||
template <bool DIRECTION, typename... Args>
|
||||
void routingStep(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::MLD> &facade,
|
||||
SearchEngineData::MultiLayerDijkstraHeap &forward_heap,
|
||||
SearchEngineData::MultiLayerDijkstraHeap &reverse_heap,
|
||||
NodeID &middle_node,
|
||||
EdgeWeight &path_upper_bound,
|
||||
Args... args)
|
||||
{
|
||||
const auto &partition = facade.GetMultiLevelPartition();
|
||||
const auto &cells = facade.GetCellStorage();
|
||||
|
||||
const auto node = forward_heap.DeleteMin();
|
||||
const auto weight = forward_heap.GetKey(node);
|
||||
|
||||
// Upper bound for the path source -> target with
|
||||
// weight(source -> node) = weight weight(to -> target) ≤ reverse_weight
|
||||
// is weight + reverse_weight
|
||||
// More tighter upper bound requires additional condition reverse_heap.WasRemoved(to)
|
||||
// with weight(to -> target) = reverse_weight and all weights ≥ 0
|
||||
if (reverse_heap.WasInserted(node))
|
||||
{
|
||||
auto reverse_weight = reverse_heap.GetKey(node);
|
||||
auto path_weight = weight + reverse_weight;
|
||||
if (path_weight >= 0 && path_weight < path_upper_bound)
|
||||
{
|
||||
middle_node = node;
|
||||
path_upper_bound = path_weight;
|
||||
}
|
||||
}
|
||||
|
||||
const auto level = getNodeQureyLevel(partition, node, args...);
|
||||
|
||||
if (level >= 1 && !forward_heap.GetData(node).from_clique_arc)
|
||||
{
|
||||
if (DIRECTION == FORWARD_DIRECTION)
|
||||
{
|
||||
// Shortcuts in forward direction
|
||||
const auto &cell = cells.GetCell(level, partition.GetCell(level, node));
|
||||
auto destination = cell.GetDestinationNodes().begin();
|
||||
for (auto shortcut_weight : cell.GetOutWeight(node))
|
||||
{
|
||||
BOOST_ASSERT(destination != cell.GetDestinationNodes().end());
|
||||
const NodeID to = *destination;
|
||||
if (shortcut_weight != INVALID_EDGE_WEIGHT && node != to)
|
||||
{
|
||||
const EdgeWeight to_weight = weight + shortcut_weight;
|
||||
if (!forward_heap.WasInserted(to))
|
||||
{
|
||||
forward_heap.Insert(to, to_weight, {node, true});
|
||||
}
|
||||
else if (to_weight < forward_heap.GetKey(to))
|
||||
{
|
||||
forward_heap.GetData(to) = {node, true};
|
||||
forward_heap.DecreaseKey(to, to_weight);
|
||||
}
|
||||
}
|
||||
++destination;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Shortcuts in backward direction
|
||||
const auto &cell = cells.GetCell(level, partition.GetCell(level, node));
|
||||
auto source = cell.GetSourceNodes().begin();
|
||||
for (auto shortcut_weight : cell.GetInWeight(node))
|
||||
{
|
||||
BOOST_ASSERT(source != cell.GetSourceNodes().end());
|
||||
const NodeID to = *source;
|
||||
if (shortcut_weight != INVALID_EDGE_WEIGHT && node != to)
|
||||
{
|
||||
const EdgeWeight to_weight = weight + shortcut_weight;
|
||||
if (!forward_heap.WasInserted(to))
|
||||
{
|
||||
forward_heap.Insert(to, to_weight, {node, true});
|
||||
}
|
||||
else if (to_weight < forward_heap.GetKey(to))
|
||||
{
|
||||
forward_heap.GetData(to) = {node, true};
|
||||
forward_heap.DecreaseKey(to, to_weight);
|
||||
}
|
||||
}
|
||||
++source;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Boundary edges
|
||||
for (const auto edge : facade.GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const auto &edge_data = facade.GetEdgeData(edge);
|
||||
if (DIRECTION == FORWARD_DIRECTION ? edge_data.forward : edge_data.backward)
|
||||
{
|
||||
const NodeID to = facade.GetTarget(edge);
|
||||
|
||||
if (checkParentCellRestriction(partition.GetCell(level + 1, to), args...) &&
|
||||
partition.GetHighestDifferentLevel(node, to) >= level)
|
||||
{
|
||||
BOOST_ASSERT_MSG(edge_data.weight > 0, "edge_weight invalid");
|
||||
const EdgeWeight to_weight = weight + edge_data.weight;
|
||||
|
||||
if (!forward_heap.WasInserted(to))
|
||||
{
|
||||
forward_heap.Insert(to, to_weight, {node, false});
|
||||
}
|
||||
else if (to_weight < forward_heap.GetKey(to))
|
||||
{
|
||||
forward_heap.GetData(to) = {node, false};
|
||||
forward_heap.DecreaseKey(to, to_weight);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
std::tuple<EdgeWeight, NodeID, NodeID, std::vector<EdgeID>>
|
||||
search(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::MLD> &facade,
|
||||
SearchEngineData::MultiLayerDijkstraHeap &forward_heap,
|
||||
SearchEngineData::MultiLayerDijkstraHeap &reverse_heap,
|
||||
Args... args)
|
||||
{
|
||||
|
||||
const auto &partition = facade.GetMultiLevelPartition();
|
||||
|
||||
BOOST_ASSERT(!forward_heap.Empty() && forward_heap.MinKey() < INVALID_EDGE_WEIGHT);
|
||||
BOOST_ASSERT(!reverse_heap.Empty() && reverse_heap.MinKey() < INVALID_EDGE_WEIGHT);
|
||||
|
||||
// run two-Target Dijkstra routing step.
|
||||
NodeID middle = SPECIAL_NODEID;
|
||||
EdgeWeight weight = INVALID_EDGE_WEIGHT;
|
||||
EdgeWeight forward_heap_min = forward_heap.MinKey();
|
||||
EdgeWeight reverse_heap_min = reverse_heap.MinKey();
|
||||
while (forward_heap.Size() + reverse_heap.Size() > 0 &&
|
||||
forward_heap_min + reverse_heap_min < weight)
|
||||
{
|
||||
if (!forward_heap.Empty())
|
||||
{
|
||||
routingStep<FORWARD_DIRECTION>(
|
||||
facade, forward_heap, reverse_heap, middle, weight, args...);
|
||||
if (!forward_heap.Empty())
|
||||
forward_heap_min = forward_heap.MinKey();
|
||||
}
|
||||
if (!reverse_heap.Empty())
|
||||
{
|
||||
routingStep<REVERSE_DIRECTION>(
|
||||
facade, reverse_heap, forward_heap, middle, weight, args...);
|
||||
if (!reverse_heap.Empty())
|
||||
reverse_heap_min = reverse_heap.MinKey();
|
||||
}
|
||||
};
|
||||
|
||||
// No path found for both target nodes?
|
||||
if (weight == INVALID_EDGE_WEIGHT || SPECIAL_NODEID == middle)
|
||||
{
|
||||
return std::make_tuple(
|
||||
INVALID_EDGE_WEIGHT, SPECIAL_NODEID, SPECIAL_NODEID, std::vector<EdgeID>());
|
||||
}
|
||||
|
||||
// Get packed path as edges {from node ID, to node ID, edge ID}
|
||||
std::vector<std::tuple<NodeID, NodeID, bool>> packed_path;
|
||||
NodeID current_node = middle, parent_node = forward_heap.GetData(middle).parent;
|
||||
while (parent_node != current_node)
|
||||
{
|
||||
const auto &data = forward_heap.GetData(current_node);
|
||||
packed_path.push_back(std::make_tuple(parent_node, current_node, data.from_clique_arc));
|
||||
current_node = parent_node;
|
||||
parent_node = forward_heap.GetData(parent_node).parent;
|
||||
}
|
||||
std::reverse(std::begin(packed_path), std::end(packed_path));
|
||||
const NodeID source_node = current_node;
|
||||
|
||||
current_node = middle, parent_node = reverse_heap.GetData(middle).parent;
|
||||
while (parent_node != current_node)
|
||||
{
|
||||
const auto &data = reverse_heap.GetData(current_node);
|
||||
packed_path.push_back(std::make_tuple(current_node, parent_node, data.from_clique_arc));
|
||||
current_node = parent_node;
|
||||
parent_node = reverse_heap.GetData(parent_node).parent;
|
||||
}
|
||||
const NodeID target_node = current_node;
|
||||
|
||||
// Unpack path
|
||||
std::vector<EdgeID> unpacked_path;
|
||||
unpacked_path.reserve(packed_path.size());
|
||||
for (auto const &packed_edge : packed_path)
|
||||
{
|
||||
NodeID source, target;
|
||||
bool overlay_edge;
|
||||
std::tie(source, target, overlay_edge) = packed_edge;
|
||||
if (!overlay_edge)
|
||||
{ // a base graph edge
|
||||
unpacked_path.push_back(facade.FindEdge(source, target));
|
||||
}
|
||||
else
|
||||
{ // an overlay graph edge
|
||||
LevelID level = getNodeQureyLevel(partition, source, args...);
|
||||
CellID parent_cell_id = partition.GetCell(level, source);
|
||||
BOOST_ASSERT(parent_cell_id == partition.GetCell(level, target));
|
||||
|
||||
LevelID sublevel = level - 1;
|
||||
|
||||
// Here heaps can be reused, let's go deeper!
|
||||
forward_heap.Clear();
|
||||
reverse_heap.Clear();
|
||||
forward_heap.Insert(source, 0, {source});
|
||||
reverse_heap.Insert(target, 0, {target});
|
||||
|
||||
// TODO: when structured bindings will be allowed change to
|
||||
// auto [subpath_weight, subpath_source, subpath_target, subpath] = ...
|
||||
EdgeWeight subpath_weight;
|
||||
NodeID subpath_source, subpath_target;
|
||||
std::vector<EdgeID> subpath;
|
||||
std::tie(subpath_weight, subpath_source, subpath_target, subpath) =
|
||||
search(facade, forward_heap, reverse_heap, sublevel, parent_cell_id);
|
||||
BOOST_ASSERT(!subpath.empty());
|
||||
BOOST_ASSERT(subpath_source == source);
|
||||
BOOST_ASSERT(subpath_target == target);
|
||||
unpacked_path.insert(unpacked_path.end(), subpath.begin(), subpath.end());
|
||||
}
|
||||
}
|
||||
|
||||
return std::make_tuple(weight, source_node, target_node, std::move(unpacked_path));
|
||||
}
|
||||
|
||||
} // namespace mld
|
||||
} // namespace routing_algorithms
|
||||
} // namespace engine
|
||||
} // namespace osrm
|
||||
|
||||
#endif // OSRM_ENGINE_ROUTING_BASE_MLD_HPP
|
@ -3,6 +3,7 @@
|
||||
|
||||
#include <boost/thread/tss.hpp>
|
||||
|
||||
#include "partition/multi_level_partition.hpp"
|
||||
#include "util/binary_heap.hpp"
|
||||
#include "util/typedefs.hpp"
|
||||
|
||||
@ -23,6 +24,13 @@ struct ManyToManyHeapData : HeapData
|
||||
ManyToManyHeapData(NodeID p, EdgeWeight duration) : HeapData(p), duration(duration) {}
|
||||
};
|
||||
|
||||
struct MultiLayerDijkstraHeapData : HeapData
|
||||
{
|
||||
bool from_clique_arc;
|
||||
MultiLayerDijkstraHeapData(NodeID p) : HeapData(p), from_clique_arc(false) {}
|
||||
MultiLayerDijkstraHeapData(NodeID p, bool from) : HeapData(p), from_clique_arc(from) {}
|
||||
};
|
||||
|
||||
struct SearchEngineData
|
||||
{
|
||||
using QueryHeap = util::
|
||||
@ -37,6 +45,14 @@ struct SearchEngineData
|
||||
|
||||
using ManyToManyHeapPtr = boost::thread_specific_ptr<ManyToManyQueryHeap>;
|
||||
|
||||
using MultiLayerDijkstraHeap = util::BinaryHeap<NodeID,
|
||||
NodeID,
|
||||
EdgeWeight,
|
||||
MultiLayerDijkstraHeapData,
|
||||
util::UnorderedMapStorage<NodeID, int>>;
|
||||
|
||||
using MultiLayerDijkstraHeapPtr = boost::thread_specific_ptr<MultiLayerDijkstraHeap>;
|
||||
|
||||
static SearchEngineHeapPtr forward_heap_1;
|
||||
static SearchEngineHeapPtr reverse_heap_1;
|
||||
static SearchEngineHeapPtr forward_heap_2;
|
||||
@ -44,6 +60,8 @@ struct SearchEngineData
|
||||
static SearchEngineHeapPtr forward_heap_3;
|
||||
static SearchEngineHeapPtr reverse_heap_3;
|
||||
static ManyToManyHeapPtr many_to_many_heap;
|
||||
static MultiLayerDijkstraHeapPtr mld_forward_heap;
|
||||
static MultiLayerDijkstraHeapPtr mld_reverse_heap;
|
||||
|
||||
void InitializeOrClearFirstThreadLocalStorage(const unsigned number_of_nodes);
|
||||
|
||||
@ -52,6 +70,8 @@ struct SearchEngineData
|
||||
void InitializeOrClearThirdThreadLocalStorage(const unsigned number_of_nodes);
|
||||
|
||||
void InitializeOrClearManyToManyThreadLocalStorage(const unsigned number_of_nodes);
|
||||
|
||||
void InitializeOrClearMultiLayerDijkstraThreadLocalStorage(const unsigned number_of_nodes);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -32,19 +32,19 @@ struct EdgeBasedEdge
|
||||
|
||||
struct EdgeData
|
||||
{
|
||||
EdgeData() : edge_id(0), weight(0), duration(0), forward(false), backward(false) {}
|
||||
EdgeData() : turn_id(0), weight(0), duration(0), forward(false), backward(false) {}
|
||||
|
||||
EdgeData(const NodeID edge_id,
|
||||
EdgeData(const NodeID turn_id,
|
||||
const EdgeWeight weight,
|
||||
const EdgeWeight duration,
|
||||
const bool forward,
|
||||
const bool backward)
|
||||
: edge_id(edge_id), weight(weight), duration(duration), forward(forward),
|
||||
: turn_id(turn_id), weight(weight), duration(duration), forward(forward),
|
||||
backward(backward)
|
||||
{
|
||||
}
|
||||
|
||||
NodeID edge_id;
|
||||
NodeID turn_id; // ID of the edge based node (node based edge)
|
||||
EdgeWeight weight;
|
||||
EdgeWeight duration : 30;
|
||||
std::uint32_t forward : 1;
|
||||
@ -64,12 +64,12 @@ inline EdgeBasedEdge::EdgeBasedEdge() : source(0), target(0) {}
|
||||
|
||||
inline EdgeBasedEdge::EdgeBasedEdge(const NodeID source,
|
||||
const NodeID target,
|
||||
const NodeID edge_id,
|
||||
const NodeID turn_id,
|
||||
const EdgeWeight weight,
|
||||
const EdgeWeight duration,
|
||||
const bool forward,
|
||||
const bool backward)
|
||||
: source(source), target(target), data{edge_id, weight, duration, forward, backward}
|
||||
: source(source), target(target), data{turn_id, weight, duration, forward, backward}
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -172,8 +172,8 @@ template <bool UseShareMemory> class CellStorageImpl
|
||||
destination_boundary{all_destinations + data.destination_boundary_offset}
|
||||
{
|
||||
BOOST_ASSERT(all_weight != nullptr);
|
||||
BOOST_ASSERT(all_sources != nullptr);
|
||||
BOOST_ASSERT(all_destinations != nullptr);
|
||||
BOOST_ASSERT(num_source_nodes == 0 || all_sources != nullptr);
|
||||
BOOST_ASSERT(num_destination_nodes == 0 || all_destinations != nullptr);
|
||||
}
|
||||
};
|
||||
|
||||
@ -333,8 +333,10 @@ template <bool UseShareMemory> class CellStorageImpl
|
||||
const auto offset = level_to_cell_offset[level_index];
|
||||
const auto cell_index = offset + id;
|
||||
BOOST_ASSERT(cell_index < cells.size());
|
||||
return ConstCell{
|
||||
cells[cell_index], weights.data(), source_boundary.data(), destination_boundary.data()};
|
||||
return ConstCell{cells[cell_index],
|
||||
weights.data(),
|
||||
source_boundary.empty() ? nullptr : source_boundary.data(),
|
||||
destination_boundary.empty() ? nullptr : destination_boundary.data()};
|
||||
}
|
||||
|
||||
template <typename = std::enable_if<!UseShareMemory>> Cell GetCell(LevelID level, CellID id)
|
||||
|
@ -35,7 +35,7 @@ splitBidirectionalEdges(const std::vector<extractor::EdgeBasedEdge> &edges)
|
||||
|
||||
directed.emplace_back(edge.source,
|
||||
edge.target,
|
||||
edge.data.edge_id,
|
||||
edge.data.turn_id,
|
||||
std::max(edge.data.weight, 1),
|
||||
edge.data.duration,
|
||||
edge.data.forward,
|
||||
@ -43,7 +43,7 @@ splitBidirectionalEdges(const std::vector<extractor::EdgeBasedEdge> &edges)
|
||||
|
||||
directed.emplace_back(edge.target,
|
||||
edge.source,
|
||||
edge.data.edge_id,
|
||||
edge.data.turn_id,
|
||||
std::max(edge.data.weight, 1),
|
||||
edge.data.duration,
|
||||
edge.data.backward,
|
||||
@ -77,7 +77,7 @@ std::vector<OutputEdgeT> prepareEdgesForUsageInGraph(std::vector<extractor::Edge
|
||||
OutputEdgeT reverse_edge;
|
||||
forward_edge.source = reverse_edge.source = source;
|
||||
forward_edge.target = reverse_edge.target = target;
|
||||
forward_edge.data.edge_id = reverse_edge.data.edge_id = edges[i].data.edge_id;
|
||||
forward_edge.data.turn_id = reverse_edge.data.turn_id = edges[i].data.turn_id;
|
||||
forward_edge.data.weight = reverse_edge.data.weight = INVALID_EDGE_WEIGHT;
|
||||
forward_edge.data.duration = reverse_edge.data.duration = MAXIMAL_EDGE_DURATION_INT_30;
|
||||
forward_edge.data.forward = reverse_edge.data.backward = true;
|
||||
|
@ -25,7 +25,7 @@
|
||||
},
|
||||
"scripts": {
|
||||
"lint": "eslint -c ./.eslintrc features/step_definitions/ features/support/",
|
||||
"test": "npm run lint && node ./node_modules/cucumber/bin/cucumber.js features/ -p verify",
|
||||
"test": "npm run lint && node ./node_modules/cucumber/bin/cucumber.js features/ -p verify && node ./node_modules/cucumber/bin/cucumber.js features/ -p mld",
|
||||
"clean-test": "rm -rf test/cache",
|
||||
"cucumber": "./node_modules/cucumber/bin/cucumber.js",
|
||||
"build-api-docs": "./scripts/build_api_docs.sh"
|
||||
|
@ -1,5 +1,5 @@
|
||||
#include "engine/routing_algorithms/alternative_path.hpp"
|
||||
#include "engine/routing_algorithms/routing_base.hpp"
|
||||
#include "engine/routing_algorithms/routing_base_ch.hpp"
|
||||
|
||||
#include "util/integer_range.hpp"
|
||||
|
||||
@ -89,7 +89,7 @@ void alternativeRoutingStep(
|
||||
else
|
||||
{
|
||||
// check whether there is a loop present at the node
|
||||
const auto loop_weight = getLoopWeight<false>(facade, node);
|
||||
const auto loop_weight = ch::getLoopWeight<false>(facade, node);
|
||||
const EdgeWeight new_weight_with_loop = new_weight + loop_weight;
|
||||
if (loop_weight != INVALID_EDGE_WEIGHT &&
|
||||
new_weight_with_loop <= *upper_bound_to_shortest_path_weight)
|
||||
@ -139,11 +139,11 @@ void retrievePackedAlternatePath(const QueryHeap &forward_heap1,
|
||||
{
|
||||
// fetch packed path [s,v)
|
||||
std::vector<NodeID> packed_v_t_path;
|
||||
retrievePackedPathFromHeap(forward_heap1, reverse_heap2, s_v_middle, packed_path);
|
||||
ch::retrievePackedPathFromHeap(forward_heap1, reverse_heap2, s_v_middle, packed_path);
|
||||
packed_path.pop_back(); // remove middle node. It's in both half-paths
|
||||
|
||||
// fetch patched path [v,t]
|
||||
retrievePackedPathFromHeap(forward_heap2, reverse_heap1, v_t_middle, packed_v_t_path);
|
||||
ch::retrievePackedPathFromHeap(forward_heap2, reverse_heap1, v_t_middle, packed_v_t_path);
|
||||
|
||||
packed_path.insert(packed_path.end(), packed_v_t_path.begin(), packed_v_t_path.end());
|
||||
}
|
||||
@ -180,14 +180,14 @@ void computeLengthAndSharingOfViaPath(
|
||||
// compute path <s,..,v> by reusing forward search from s
|
||||
while (!new_reverse_heap.Empty())
|
||||
{
|
||||
routingStep<REVERSE_DIRECTION>(facade,
|
||||
new_reverse_heap,
|
||||
existing_forward_heap,
|
||||
s_v_middle,
|
||||
upper_bound_s_v_path_length,
|
||||
min_edge_offset,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
ch::routingStep<REVERSE_DIRECTION>(facade,
|
||||
new_reverse_heap,
|
||||
existing_forward_heap,
|
||||
s_v_middle,
|
||||
upper_bound_s_v_path_length,
|
||||
min_edge_offset,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
// compute path <v,..,t> by reusing backward search from node t
|
||||
NodeID v_t_middle = SPECIAL_NODEID;
|
||||
@ -195,14 +195,14 @@ void computeLengthAndSharingOfViaPath(
|
||||
new_forward_heap.Insert(via_node, 0, via_node);
|
||||
while (!new_forward_heap.Empty())
|
||||
{
|
||||
routingStep<FORWARD_DIRECTION>(facade,
|
||||
new_forward_heap,
|
||||
existing_reverse_heap,
|
||||
v_t_middle,
|
||||
upper_bound_of_v_t_path_length,
|
||||
min_edge_offset,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
ch::routingStep<FORWARD_DIRECTION>(facade,
|
||||
new_forward_heap,
|
||||
existing_reverse_heap,
|
||||
v_t_middle,
|
||||
upper_bound_of_v_t_path_length,
|
||||
min_edge_offset,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
*real_length_of_via_path = upper_bound_s_v_path_length + upper_bound_of_v_t_path_length;
|
||||
|
||||
@ -212,9 +212,9 @@ void computeLengthAndSharingOfViaPath(
|
||||
}
|
||||
|
||||
// retrieve packed paths
|
||||
retrievePackedPathFromHeap(
|
||||
ch::retrievePackedPathFromHeap(
|
||||
existing_forward_heap, new_reverse_heap, s_v_middle, packed_s_v_path);
|
||||
retrievePackedPathFromHeap(
|
||||
ch::retrievePackedPathFromHeap(
|
||||
new_forward_heap, existing_reverse_heap, v_t_middle, packed_v_t_path);
|
||||
|
||||
// partial unpacking, compute sharing
|
||||
@ -234,14 +234,14 @@ void computeLengthAndSharingOfViaPath(
|
||||
{
|
||||
if (packed_s_v_path[current_node] == packed_shortest_path[current_node])
|
||||
{
|
||||
unpackEdge(facade,
|
||||
packed_s_v_path[current_node],
|
||||
packed_s_v_path[current_node + 1],
|
||||
partially_unpacked_via_path);
|
||||
unpackEdge(facade,
|
||||
packed_shortest_path[current_node],
|
||||
packed_shortest_path[current_node + 1],
|
||||
partially_unpacked_shortest_path);
|
||||
ch::unpackEdge(facade,
|
||||
packed_s_v_path[current_node],
|
||||
packed_s_v_path[current_node + 1],
|
||||
partially_unpacked_via_path);
|
||||
ch::unpackEdge(facade,
|
||||
packed_shortest_path[current_node],
|
||||
packed_shortest_path[current_node + 1],
|
||||
partially_unpacked_shortest_path);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -280,14 +280,14 @@ void computeLengthAndSharingOfViaPath(
|
||||
{
|
||||
if (packed_v_t_path[via_path_index] == packed_shortest_path[shortest_path_index])
|
||||
{
|
||||
unpackEdge(facade,
|
||||
packed_v_t_path[via_path_index - 1],
|
||||
packed_v_t_path[via_path_index],
|
||||
partially_unpacked_via_path);
|
||||
unpackEdge(facade,
|
||||
packed_shortest_path[shortest_path_index - 1],
|
||||
packed_shortest_path[shortest_path_index],
|
||||
partially_unpacked_shortest_path);
|
||||
ch::unpackEdge(facade,
|
||||
packed_v_t_path[via_path_index - 1],
|
||||
packed_v_t_path[via_path_index],
|
||||
partially_unpacked_via_path);
|
||||
ch::unpackEdge(facade,
|
||||
packed_shortest_path[shortest_path_index - 1],
|
||||
packed_shortest_path[shortest_path_index],
|
||||
partially_unpacked_shortest_path);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -342,14 +342,14 @@ bool viaNodeCandidatePassesTTest(
|
||||
new_reverse_heap.Insert(candidate.node, 0, candidate.node);
|
||||
while (new_reverse_heap.Size() > 0)
|
||||
{
|
||||
routingStep<REVERSE_DIRECTION>(facade,
|
||||
new_reverse_heap,
|
||||
existing_forward_heap,
|
||||
*s_v_middle,
|
||||
upper_bound_s_v_path_length,
|
||||
min_edge_offset,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
ch::routingStep<REVERSE_DIRECTION>(facade,
|
||||
new_reverse_heap,
|
||||
existing_forward_heap,
|
||||
*s_v_middle,
|
||||
upper_bound_s_v_path_length,
|
||||
min_edge_offset,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
|
||||
if (INVALID_EDGE_WEIGHT == upper_bound_s_v_path_length)
|
||||
@ -363,14 +363,14 @@ bool viaNodeCandidatePassesTTest(
|
||||
new_forward_heap.Insert(candidate.node, 0, candidate.node);
|
||||
while (new_forward_heap.Size() > 0)
|
||||
{
|
||||
routingStep<FORWARD_DIRECTION>(facade,
|
||||
new_forward_heap,
|
||||
existing_reverse_heap,
|
||||
*v_t_middle,
|
||||
upper_bound_of_v_t_path_length,
|
||||
min_edge_offset,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
ch::routingStep<FORWARD_DIRECTION>(facade,
|
||||
new_forward_heap,
|
||||
existing_reverse_heap,
|
||||
*v_t_middle,
|
||||
upper_bound_of_v_t_path_length,
|
||||
min_edge_offset,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
|
||||
if (INVALID_EDGE_WEIGHT == upper_bound_of_v_t_path_length)
|
||||
@ -381,10 +381,10 @@ bool viaNodeCandidatePassesTTest(
|
||||
*length_of_via_path = upper_bound_s_v_path_length + upper_bound_of_v_t_path_length;
|
||||
|
||||
// retrieve packed paths
|
||||
retrievePackedPathFromHeap(
|
||||
ch::retrievePackedPathFromHeap(
|
||||
existing_forward_heap, new_reverse_heap, *s_v_middle, packed_s_v_path);
|
||||
|
||||
retrievePackedPathFromHeap(
|
||||
ch::retrievePackedPathFromHeap(
|
||||
new_forward_heap, existing_reverse_heap, *v_t_middle, packed_v_t_path);
|
||||
|
||||
NodeID s_P = *s_v_middle, t_P = *v_t_middle;
|
||||
@ -434,7 +434,7 @@ bool viaNodeCandidatePassesTTest(
|
||||
const bool current_edge_is_shortcut = current_edge_data.shortcut;
|
||||
if (current_edge_is_shortcut)
|
||||
{
|
||||
const NodeID via_path_middle_node_id = current_edge_data.id;
|
||||
const NodeID via_path_middle_node_id = current_edge_data.turn_id;
|
||||
const EdgeID second_segment_edge_id =
|
||||
facade.FindEdgeInEitherDirection(via_path_middle_node_id, via_path_edge.second);
|
||||
const int second_segment_length = facade.GetEdgeData(second_segment_edge_id).weight;
|
||||
@ -496,7 +496,7 @@ bool viaNodeCandidatePassesTTest(
|
||||
const bool IsViaEdgeShortCut = current_edge_data.shortcut;
|
||||
if (IsViaEdgeShortCut)
|
||||
{
|
||||
const NodeID middleOfViaPath = current_edge_data.id;
|
||||
const NodeID middleOfViaPath = current_edge_data.turn_id;
|
||||
EdgeID edgeIDOfFirstSegment =
|
||||
facade.FindEdgeInEitherDirection(via_path_edge.first, middleOfViaPath);
|
||||
int lengthOfFirstSegment = facade.GetEdgeData(edgeIDOfFirstSegment).weight;
|
||||
@ -536,25 +536,25 @@ bool viaNodeCandidatePassesTTest(
|
||||
{
|
||||
if (!forward_heap3.Empty())
|
||||
{
|
||||
routingStep<FORWARD_DIRECTION>(facade,
|
||||
forward_heap3,
|
||||
reverse_heap3,
|
||||
middle,
|
||||
upper_bound,
|
||||
min_edge_offset,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
ch::routingStep<FORWARD_DIRECTION>(facade,
|
||||
forward_heap3,
|
||||
reverse_heap3,
|
||||
middle,
|
||||
upper_bound,
|
||||
min_edge_offset,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
if (!reverse_heap3.Empty())
|
||||
{
|
||||
routingStep<REVERSE_DIRECTION>(facade,
|
||||
reverse_heap3,
|
||||
forward_heap3,
|
||||
middle,
|
||||
upper_bound,
|
||||
min_edge_offset,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
ch::routingStep<REVERSE_DIRECTION>(facade,
|
||||
reverse_heap3,
|
||||
forward_heap3,
|
||||
middle,
|
||||
upper_bound,
|
||||
min_edge_offset,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
}
|
||||
return (upper_bound <= t_test_path_length);
|
||||
@ -593,35 +593,7 @@ alternativePathSearch(SearchEngineData &engine_working_data,
|
||||
? -phantom_node_pair.source_phantom.GetReverseWeightPlusOffset()
|
||||
: 0);
|
||||
|
||||
if (phantom_node_pair.source_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
BOOST_ASSERT(phantom_node_pair.source_phantom.forward_segment_id.id != SPECIAL_SEGMENTID);
|
||||
forward_heap1.Insert(phantom_node_pair.source_phantom.forward_segment_id.id,
|
||||
-phantom_node_pair.source_phantom.GetForwardWeightPlusOffset(),
|
||||
phantom_node_pair.source_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (phantom_node_pair.source_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
BOOST_ASSERT(phantom_node_pair.source_phantom.reverse_segment_id.id != SPECIAL_SEGMENTID);
|
||||
forward_heap1.Insert(phantom_node_pair.source_phantom.reverse_segment_id.id,
|
||||
-phantom_node_pair.source_phantom.GetReverseWeightPlusOffset(),
|
||||
phantom_node_pair.source_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
if (phantom_node_pair.target_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
BOOST_ASSERT(phantom_node_pair.target_phantom.forward_segment_id.id != SPECIAL_SEGMENTID);
|
||||
reverse_heap1.Insert(phantom_node_pair.target_phantom.forward_segment_id.id,
|
||||
phantom_node_pair.target_phantom.GetForwardWeightPlusOffset(),
|
||||
phantom_node_pair.target_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (phantom_node_pair.target_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
BOOST_ASSERT(phantom_node_pair.target_phantom.reverse_segment_id.id != SPECIAL_SEGMENTID);
|
||||
reverse_heap1.Insert(phantom_node_pair.target_phantom.reverse_segment_id.id,
|
||||
phantom_node_pair.target_phantom.GetReverseWeightPlusOffset(),
|
||||
phantom_node_pair.target_phantom.reverse_segment_id.id);
|
||||
}
|
||||
insertNodesInHeaps(forward_heap1, reverse_heap1, phantom_node_pair);
|
||||
|
||||
// search from s and t till new_min/(1+epsilon) > length_of_shortest_path
|
||||
while (0 < (forward_heap1.Size() + reverse_heap1.Size()))
|
||||
@ -674,8 +646,8 @@ alternativePathSearch(SearchEngineData &engine_working_data,
|
||||
else
|
||||
{
|
||||
|
||||
retrievePackedPathFromSingleHeap(forward_heap1, middle_node, packed_forward_path);
|
||||
retrievePackedPathFromSingleHeap(reverse_heap1, middle_node, packed_reverse_path);
|
||||
ch::retrievePackedPathFromSingleHeap(forward_heap1, middle_node, packed_forward_path);
|
||||
ch::retrievePackedPathFromSingleHeap(reverse_heap1, middle_node, packed_reverse_path);
|
||||
}
|
||||
|
||||
// this set is is used as an indicator if a node is on the shortest path
|
||||
@ -827,14 +799,14 @@ alternativePathSearch(SearchEngineData &engine_working_data,
|
||||
raw_route_data.target_traversed_in_reverse.push_back((
|
||||
packed_shortest_path.back() != phantom_node_pair.target_phantom.forward_segment_id.id));
|
||||
|
||||
unpackPath(facade,
|
||||
// -- packed input
|
||||
packed_shortest_path.begin(),
|
||||
packed_shortest_path.end(),
|
||||
// -- start of route
|
||||
phantom_node_pair,
|
||||
// -- unpacked output
|
||||
raw_route_data.unpacked_path_segments.front());
|
||||
ch::unpackPath(facade,
|
||||
// -- packed input
|
||||
packed_shortest_path.begin(),
|
||||
packed_shortest_path.end(),
|
||||
// -- start of route
|
||||
phantom_node_pair,
|
||||
// -- unpacked output
|
||||
raw_route_data.unpacked_path_segments.front());
|
||||
raw_route_data.shortest_path_length = upper_bound_to_shortest_path_weight;
|
||||
}
|
||||
|
||||
@ -858,11 +830,11 @@ alternativePathSearch(SearchEngineData &engine_working_data,
|
||||
phantom_node_pair.target_phantom.forward_segment_id.id));
|
||||
|
||||
// unpack the alternate path
|
||||
unpackPath(facade,
|
||||
packed_alternate_path.begin(),
|
||||
packed_alternate_path.end(),
|
||||
phantom_node_pair,
|
||||
raw_route_data.unpacked_alternative);
|
||||
ch::unpackPath(facade,
|
||||
packed_alternate_path.begin(),
|
||||
packed_alternate_path.end(),
|
||||
phantom_node_pair,
|
||||
raw_route_data.unpacked_alternative);
|
||||
|
||||
raw_route_data.alternative_path_length = length_of_via_path;
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
#include "engine/routing_algorithms/direct_shortest_path.hpp"
|
||||
|
||||
#include "engine/routing_algorithms/routing_base.hpp"
|
||||
#include "engine/routing_algorithms/routing_base_ch.hpp"
|
||||
#include "engine/routing_algorithms/routing_base_mld.hpp"
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
@ -9,50 +11,13 @@ namespace engine
|
||||
namespace routing_algorithms
|
||||
{
|
||||
|
||||
namespace
|
||||
{
|
||||
void insertInHeaps(SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
const PhantomNodes &nodes)
|
||||
{
|
||||
const auto &source_phantom = nodes.source_phantom;
|
||||
const auto &target_phantom = nodes.target_phantom;
|
||||
BOOST_ASSERT(source_phantom.IsValid());
|
||||
BOOST_ASSERT(target_phantom.IsValid());
|
||||
|
||||
if (source_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.forward_segment_id.id,
|
||||
-source_phantom.GetForwardWeightPlusOffset(),
|
||||
source_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (source_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.reverse_segment_id.id,
|
||||
-source_phantom.GetReverseWeightPlusOffset(),
|
||||
source_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
if (target_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.forward_segment_id.id,
|
||||
target_phantom.GetForwardWeightPlusOffset(),
|
||||
target_phantom.forward_segment_id.id);
|
||||
}
|
||||
|
||||
if (target_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.reverse_segment_id.id,
|
||||
target_phantom.GetReverseWeightPlusOffset(),
|
||||
target_phantom.reverse_segment_id.id);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename AlgorithmT>
|
||||
InternalRouteResult
|
||||
extractRoute(const datafacade::ContiguousInternalMemoryDataFacade<AlgorithmT> &facade,
|
||||
const EdgeWeight weight,
|
||||
const std::vector<NodeID> &packed_leg,
|
||||
const NodeID source_node,
|
||||
const NodeID target_node,
|
||||
const std::vector<EdgeID> &edges,
|
||||
const PhantomNodes &nodes)
|
||||
{
|
||||
InternalRouteResult raw_route_data;
|
||||
@ -65,25 +30,25 @@ extractRoute(const datafacade::ContiguousInternalMemoryDataFacade<AlgorithmT> &f
|
||||
return raw_route_data;
|
||||
}
|
||||
|
||||
BOOST_ASSERT_MSG(!packed_leg.empty(), "packed path empty");
|
||||
|
||||
raw_route_data.shortest_path_length = weight;
|
||||
raw_route_data.unpacked_path_segments.resize(1);
|
||||
raw_route_data.source_traversed_in_reverse.push_back(
|
||||
(packed_leg.front() != nodes.source_phantom.forward_segment_id.id));
|
||||
(source_node != nodes.source_phantom.forward_segment_id.id));
|
||||
raw_route_data.target_traversed_in_reverse.push_back(
|
||||
(packed_leg.back() != nodes.target_phantom.forward_segment_id.id));
|
||||
(target_node != nodes.target_phantom.forward_segment_id.id));
|
||||
|
||||
unpackPath(facade,
|
||||
packed_leg.begin(),
|
||||
packed_leg.end(),
|
||||
nodes,
|
||||
raw_route_data.unpacked_path_segments.front());
|
||||
annotatePath(facade,
|
||||
source_node,
|
||||
target_node,
|
||||
edges,
|
||||
nodes,
|
||||
raw_route_data.unpacked_path_segments.front());
|
||||
|
||||
return raw_route_data;
|
||||
}
|
||||
}
|
||||
|
||||
namespace ch
|
||||
{
|
||||
/// This is a striped down version of the general shortest path algorithm.
|
||||
/// The general algorithm always computes two queries for each leg. This is only
|
||||
/// necessary in case of vias, where the directions of the start node is constrainted
|
||||
@ -107,9 +72,9 @@ InternalRouteResult directShortestPathSearchImpl(
|
||||
forward_core_heap.Clear();
|
||||
reverse_core_heap.Clear();
|
||||
|
||||
int weight = INVALID_EDGE_WEIGHT;
|
||||
EdgeWeight weight = INVALID_EDGE_WEIGHT;
|
||||
std::vector<NodeID> packed_leg;
|
||||
insertInHeaps(forward_heap, reverse_heap, phantom_nodes);
|
||||
insertNodesInHeaps(forward_heap, reverse_heap, phantom_nodes);
|
||||
|
||||
search(facade,
|
||||
forward_heap,
|
||||
@ -121,23 +86,67 @@ InternalRouteResult directShortestPathSearchImpl(
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
|
||||
return extractRoute(facade, weight, packed_leg, phantom_nodes);
|
||||
std::vector<EdgeID> unpacked_edges;
|
||||
auto source_node = SPECIAL_NODEID, target_node = SPECIAL_NODEID;
|
||||
if (!packed_leg.empty())
|
||||
{
|
||||
source_node = packed_leg.front();
|
||||
target_node = packed_leg.back();
|
||||
unpacked_edges.reserve(packed_leg.size());
|
||||
unpackPath(
|
||||
facade,
|
||||
packed_leg.begin(),
|
||||
packed_leg.end(),
|
||||
[&facade, &unpacked_edges](std::pair<NodeID, NodeID> & /* edge */,
|
||||
const auto &edge_id) { unpacked_edges.push_back(edge_id); });
|
||||
}
|
||||
|
||||
return extractRoute(facade, weight, source_node, target_node, unpacked_edges, phantom_nodes);
|
||||
}
|
||||
|
||||
} // namespace ch
|
||||
|
||||
template <>
|
||||
InternalRouteResult directShortestPathSearch(
|
||||
SearchEngineData &engine_working_data,
|
||||
const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CoreCH> &facade,
|
||||
const PhantomNodes &phantom_nodes)
|
||||
{
|
||||
return directShortestPathSearchImpl(engine_working_data, facade, phantom_nodes);
|
||||
return ch::directShortestPathSearchImpl(engine_working_data, facade, phantom_nodes);
|
||||
}
|
||||
|
||||
template <>
|
||||
InternalRouteResult directShortestPathSearch(
|
||||
SearchEngineData &engine_working_data,
|
||||
const datafacade::ContiguousInternalMemoryDataFacade<algorithm::CH> &facade,
|
||||
const PhantomNodes &phantom_nodes)
|
||||
{
|
||||
return directShortestPathSearchImpl(engine_working_data, facade, phantom_nodes);
|
||||
return ch::directShortestPathSearchImpl(engine_working_data, facade, phantom_nodes);
|
||||
}
|
||||
|
||||
template <>
|
||||
InternalRouteResult directShortestPathSearch(
|
||||
SearchEngineData &engine_working_data,
|
||||
const datafacade::ContiguousInternalMemoryDataFacade<algorithm::MLD> &facade,
|
||||
const PhantomNodes &phantom_nodes)
|
||||
{
|
||||
engine_working_data.InitializeOrClearMultiLayerDijkstraThreadLocalStorage(
|
||||
facade.GetNumberOfNodes());
|
||||
auto &forward_heap = *(engine_working_data.mld_forward_heap);
|
||||
auto &reverse_heap = *(engine_working_data.mld_reverse_heap);
|
||||
forward_heap.Clear();
|
||||
reverse_heap.Clear();
|
||||
insertNodesInHeaps(forward_heap, reverse_heap, phantom_nodes);
|
||||
|
||||
// TODO: when structured bindings will be allowed change to
|
||||
// auto [weight, source_node, target_node, unpacked_edges] = ...
|
||||
EdgeWeight weight;
|
||||
NodeID source_node, target_node;
|
||||
std::vector<EdgeID> unpacked_edges;
|
||||
std::tie(weight, source_node, target_node, unpacked_edges) =
|
||||
mld::search(facade, forward_heap, reverse_heap, phantom_nodes);
|
||||
|
||||
return extractRoute(facade, weight, source_node, target_node, unpacked_edges, phantom_nodes);
|
||||
}
|
||||
|
||||
} // namespace routing_algorithms
|
||||
|
@ -1,5 +1,5 @@
|
||||
#include "engine/routing_algorithms/many_to_many.hpp"
|
||||
#include "engine/routing_algorithms/routing_base.hpp"
|
||||
#include "engine/routing_algorithms/routing_base_ch.hpp"
|
||||
|
||||
#include <boost/assert.hpp>
|
||||
|
||||
@ -101,14 +101,14 @@ void forwardRoutingStep(const datafacade::ContiguousInternalMemoryDataFacade<alg
|
||||
const EdgeWeight new_weight = source_weight + target_weight;
|
||||
if (new_weight < 0)
|
||||
{
|
||||
const EdgeWeight loop_weight = getLoopWeight<false>(facade, node);
|
||||
const EdgeWeight loop_weight = ch::getLoopWeight<false>(facade, node);
|
||||
const EdgeWeight new_weight_with_loop = new_weight + loop_weight;
|
||||
if (loop_weight != INVALID_EDGE_WEIGHT && new_weight_with_loop >= 0)
|
||||
{
|
||||
current_weight = std::min(current_weight, new_weight_with_loop);
|
||||
current_duration = std::min(current_duration,
|
||||
source_duration + target_duration +
|
||||
getLoopWeight<true>(facade, node));
|
||||
ch::getLoopWeight<true>(facade, node));
|
||||
}
|
||||
}
|
||||
else if (new_weight < current_weight)
|
||||
@ -118,7 +118,7 @@ void forwardRoutingStep(const datafacade::ContiguousInternalMemoryDataFacade<alg
|
||||
}
|
||||
}
|
||||
}
|
||||
if (stallAtNode<FORWARD_DIRECTION>(facade, node, source_weight, query_heap))
|
||||
if (ch::stallAtNode<FORWARD_DIRECTION>(facade, node, source_weight, query_heap))
|
||||
{
|
||||
return;
|
||||
}
|
||||
@ -139,7 +139,7 @@ void backwardRoutingStep(
|
||||
// store settled nodes in search space bucket
|
||||
search_space_with_buckets[node].emplace_back(column_idx, target_weight, target_duration);
|
||||
|
||||
if (stallAtNode<REVERSE_DIRECTION>(facade, node, target_weight, query_heap))
|
||||
if (ch::stallAtNode<REVERSE_DIRECTION>(facade, node, target_weight, query_heap))
|
||||
{
|
||||
return;
|
||||
}
|
||||
@ -172,21 +172,9 @@ manyToManySearch(SearchEngineData &engine_working_data,
|
||||
|
||||
unsigned column_idx = 0;
|
||||
const auto search_target_phantom = [&](const PhantomNode &phantom) {
|
||||
// clear heap and insert target nodes
|
||||
query_heap.Clear();
|
||||
// insert target(s) at weight 0
|
||||
|
||||
if (phantom.forward_segment_id.enabled)
|
||||
{
|
||||
query_heap.Insert(phantom.forward_segment_id.id,
|
||||
phantom.GetForwardWeightPlusOffset(),
|
||||
{phantom.forward_segment_id.id, phantom.GetForwardDuration()});
|
||||
}
|
||||
if (phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
query_heap.Insert(phantom.reverse_segment_id.id,
|
||||
phantom.GetReverseWeightPlusOffset(),
|
||||
{phantom.reverse_segment_id.id, phantom.GetReverseDuration()});
|
||||
}
|
||||
insertNodesInHeap<REVERSE_DIRECTION>(query_heap, phantom);
|
||||
|
||||
// explore search space
|
||||
while (!query_heap.Empty())
|
||||
@ -199,21 +187,9 @@ manyToManySearch(SearchEngineData &engine_working_data,
|
||||
// for each source do forward search
|
||||
unsigned row_idx = 0;
|
||||
const auto search_source_phantom = [&](const PhantomNode &phantom) {
|
||||
// clear heap and insert source nodes
|
||||
query_heap.Clear();
|
||||
// insert target(s) at weight 0
|
||||
|
||||
if (phantom.forward_segment_id.enabled)
|
||||
{
|
||||
query_heap.Insert(phantom.forward_segment_id.id,
|
||||
-phantom.GetForwardWeightPlusOffset(),
|
||||
{phantom.forward_segment_id.id, -phantom.GetForwardDuration()});
|
||||
}
|
||||
if (phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
query_heap.Insert(phantom.reverse_segment_id.id,
|
||||
-phantom.GetReverseWeightPlusOffset(),
|
||||
{phantom.reverse_segment_id.id, -phantom.GetReverseDuration()});
|
||||
}
|
||||
insertNodesInHeap<FORWARD_DIRECTION>(query_heap, phantom);
|
||||
|
||||
// explore search space
|
||||
while (!query_heap.Empty())
|
||||
|
@ -1,5 +1,5 @@
|
||||
#include "engine/routing_algorithms/map_matching.hpp"
|
||||
#include "engine/routing_algorithms/routing_base.hpp"
|
||||
#include "engine/routing_algorithms/routing_base_ch.hpp"
|
||||
|
||||
#include "engine/map_matching/hidden_markov_model.hpp"
|
||||
#include "engine/map_matching/matching_confidence.hpp"
|
||||
@ -210,14 +210,14 @@ mapMatchingImpl(SearchEngineData &engine_working_data,
|
||||
}
|
||||
|
||||
double network_distance =
|
||||
getNetworkDistance(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
prev_unbroken_timestamps_list[s].phantom_node,
|
||||
current_timestamps_list[s_prime].phantom_node,
|
||||
duration_upper_bound);
|
||||
ch::getNetworkDistance(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
prev_unbroken_timestamps_list[s].phantom_node,
|
||||
current_timestamps_list[s_prime].phantom_node,
|
||||
duration_upper_bound);
|
||||
|
||||
// get distance diff between loc1/2 and locs/s_prime
|
||||
const auto d_t = std::abs(network_distance - haversine_distance);
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "engine/routing_algorithms/routing_base.hpp"
|
||||
#include "engine/routing_algorithms/routing_base_ch.hpp"
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
@ -6,6 +6,8 @@ namespace engine
|
||||
{
|
||||
namespace routing_algorithms
|
||||
{
|
||||
namespace ch
|
||||
{
|
||||
|
||||
/**
|
||||
* Unpacks a single edge (NodeID->NodeID) from the CH graph down to it's original non-shortcut
|
||||
@ -411,31 +413,7 @@ getNetworkDistance(const datafacade::ContiguousInternalMemoryDataFacade<algorith
|
||||
forward_core_heap.Clear();
|
||||
reverse_core_heap.Clear();
|
||||
|
||||
if (source_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.forward_segment_id.id,
|
||||
-source_phantom.GetForwardWeightPlusOffset(),
|
||||
source_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (source_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.reverse_segment_id.id,
|
||||
-source_phantom.GetReverseWeightPlusOffset(),
|
||||
source_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
if (target_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.forward_segment_id.id,
|
||||
target_phantom.GetForwardWeightPlusOffset(),
|
||||
target_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (target_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.reverse_segment_id.id,
|
||||
target_phantom.GetReverseWeightPlusOffset(),
|
||||
target_phantom.reverse_segment_id.id);
|
||||
}
|
||||
insertNodesInHeaps(forward_heap, reverse_heap, {source_phantom, target_phantom});
|
||||
|
||||
EdgeWeight weight = INVALID_EDGE_WEIGHT;
|
||||
std::vector<NodeID> packed_path;
|
||||
@ -517,6 +495,7 @@ getNetworkDistance(const datafacade::ContiguousInternalMemoryDataFacade<algorith
|
||||
return getPathDistance(facade, packed_path, source_phantom, target_phantom);
|
||||
}
|
||||
|
||||
} // namespace ch
|
||||
} // namespace routing_algorithms
|
||||
} // namespace engine
|
||||
} // namespace osrm
|
@ -1,5 +1,5 @@
|
||||
#include "engine/routing_algorithms/shortest_path.hpp"
|
||||
#include "engine/routing_algorithms/routing_base.hpp"
|
||||
#include "engine/routing_algorithms/routing_base_ch.hpp"
|
||||
|
||||
#include <boost/assert.hpp>
|
||||
#include <boost/optional.hpp>
|
||||
@ -71,23 +71,24 @@ void searchWithUTurn(const datafacade::ContiguousInternalMemoryDataFacade<Algori
|
||||
auto is_oneway_source = !(search_from_forward_node && search_from_reverse_node);
|
||||
auto is_oneway_target = !(search_to_forward_node && search_to_reverse_node);
|
||||
// we only enable loops here if we can't search from forward to backward node
|
||||
auto needs_loop_forwad = is_oneway_source && needsLoopForward(source_phantom, target_phantom);
|
||||
auto needs_loop_forwad =
|
||||
is_oneway_source && ch::needsLoopForward(source_phantom, target_phantom);
|
||||
auto needs_loop_backwards =
|
||||
is_oneway_target && needsLoopBackwards(source_phantom, target_phantom);
|
||||
is_oneway_target && ch::needsLoopBackwards(source_phantom, target_phantom);
|
||||
|
||||
forward_core_heap.Clear();
|
||||
reverse_core_heap.Clear();
|
||||
BOOST_ASSERT(forward_core_heap.Size() == 0);
|
||||
BOOST_ASSERT(reverse_core_heap.Size() == 0);
|
||||
routing_algorithms::search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
new_total_weight,
|
||||
leg_packed_path,
|
||||
needs_loop_forwad,
|
||||
needs_loop_backwards);
|
||||
ch::search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
new_total_weight,
|
||||
leg_packed_path,
|
||||
needs_loop_forwad,
|
||||
needs_loop_backwards);
|
||||
|
||||
// if no route is found between two parts of the via-route, the entire route becomes
|
||||
// invalid. Adding to invalid edge weight sadly doesn't return an invalid edge weight. Here
|
||||
@ -147,15 +148,15 @@ void search(const datafacade::ContiguousInternalMemoryDataFacade<AlgorithmT> &fa
|
||||
reverse_core_heap.Clear();
|
||||
BOOST_ASSERT(forward_core_heap.Size() == 0);
|
||||
BOOST_ASSERT(reverse_core_heap.Size() == 0);
|
||||
routing_algorithms::search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
new_total_weight_to_forward,
|
||||
leg_packed_path_forward,
|
||||
needsLoopForward(source_phantom, target_phantom),
|
||||
DO_NOT_FORCE_LOOP);
|
||||
ch::search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
new_total_weight_to_forward,
|
||||
leg_packed_path_forward,
|
||||
ch::needsLoopForward(source_phantom, target_phantom),
|
||||
DO_NOT_FORCE_LOOP);
|
||||
}
|
||||
|
||||
if (search_to_reverse_node)
|
||||
@ -185,15 +186,15 @@ void search(const datafacade::ContiguousInternalMemoryDataFacade<AlgorithmT> &fa
|
||||
reverse_core_heap.Clear();
|
||||
BOOST_ASSERT(forward_core_heap.Size() == 0);
|
||||
BOOST_ASSERT(reverse_core_heap.Size() == 0);
|
||||
routing_algorithms::search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
new_total_weight_to_reverse,
|
||||
leg_packed_path_reverse,
|
||||
DO_NOT_FORCE_LOOP,
|
||||
needsLoopBackwards(source_phantom, target_phantom));
|
||||
ch::search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
new_total_weight_to_reverse,
|
||||
leg_packed_path_reverse,
|
||||
DO_NOT_FORCE_LOOP,
|
||||
ch::needsLoopBackwards(source_phantom, target_phantom));
|
||||
}
|
||||
}
|
||||
|
||||
@ -213,11 +214,11 @@ void unpackLegs(const datafacade::ContiguousInternalMemoryDataFacade<algorithm::
|
||||
auto leg_begin = total_packed_path.begin() + packed_leg_begin[current_leg];
|
||||
auto leg_end = total_packed_path.begin() + packed_leg_begin[current_leg + 1];
|
||||
const auto &unpack_phantom_node_pair = phantom_nodes_vector[current_leg];
|
||||
unpackPath(facade,
|
||||
leg_begin,
|
||||
leg_end,
|
||||
unpack_phantom_node_pair,
|
||||
raw_route_data.unpacked_path_segments[current_leg]);
|
||||
ch::unpackPath(facade,
|
||||
leg_begin,
|
||||
leg_end,
|
||||
unpack_phantom_node_pair,
|
||||
raw_route_data.unpacked_path_segments[current_leg]);
|
||||
|
||||
raw_route_data.source_traversed_in_reverse.push_back(
|
||||
(*leg_begin != phantom_nodes_vector[current_leg].source_phantom.forward_segment_id.id));
|
||||
|
@ -15,6 +15,9 @@ SearchEngineData::SearchEngineHeapPtr SearchEngineData::forward_heap_3;
|
||||
SearchEngineData::SearchEngineHeapPtr SearchEngineData::reverse_heap_3;
|
||||
SearchEngineData::ManyToManyHeapPtr SearchEngineData::many_to_many_heap;
|
||||
|
||||
SearchEngineData::MultiLayerDijkstraHeapPtr SearchEngineData::mld_forward_heap;
|
||||
SearchEngineData::MultiLayerDijkstraHeapPtr SearchEngineData::mld_reverse_heap;
|
||||
|
||||
void SearchEngineData::InitializeOrClearFirstThreadLocalStorage(const unsigned number_of_nodes)
|
||||
{
|
||||
if (forward_heap_1.get())
|
||||
@ -89,5 +92,27 @@ void SearchEngineData::InitializeOrClearManyToManyThreadLocalStorage(const unsig
|
||||
many_to_many_heap.reset(new ManyToManyQueryHeap(number_of_nodes));
|
||||
}
|
||||
}
|
||||
|
||||
void SearchEngineData::InitializeOrClearMultiLayerDijkstraThreadLocalStorage(
|
||||
const unsigned number_of_nodes)
|
||||
{
|
||||
if (mld_forward_heap.get())
|
||||
{
|
||||
mld_forward_heap->Clear();
|
||||
}
|
||||
else
|
||||
{
|
||||
mld_forward_heap.reset(new MultiLayerDijkstraHeap(number_of_nodes));
|
||||
}
|
||||
|
||||
if (mld_reverse_heap.get())
|
||||
{
|
||||
mld_reverse_heap->Clear();
|
||||
}
|
||||
else
|
||||
{
|
||||
mld_reverse_heap.reset(new MultiLayerDijkstraHeap(number_of_nodes));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ parseArguments(int argc, char *argv[], customizer::CustomizationConfig &customiz
|
||||
if (!option_variables.count("input"))
|
||||
{
|
||||
std::cout << visible_options;
|
||||
return return_code::exit;
|
||||
return return_code::fail;
|
||||
}
|
||||
|
||||
return return_code::ok;
|
||||
|
@ -116,7 +116,7 @@ return_code parseArguments(int argc, char *argv[], partition::PartitionConfig &p
|
||||
if (!option_variables.count("input"))
|
||||
{
|
||||
std::cout << visible_options;
|
||||
return return_code::exit;
|
||||
return return_code::fail;
|
||||
}
|
||||
|
||||
return return_code::ok;
|
||||
|
@ -93,8 +93,8 @@ void checkWeightsConsistency(
|
||||
|
||||
for (auto &edge : edge_based_edge_list)
|
||||
{
|
||||
BOOST_ASSERT(edge.data.edge_id < current_edge_data.size());
|
||||
auto geometry_id = current_edge_data[edge.data.edge_id].via_geometry;
|
||||
BOOST_ASSERT(edge.data.turn_id < current_edge_data.size());
|
||||
auto geometry_id = current_edge_data[edge.data.turn_id].via_geometry;
|
||||
|
||||
if (geometry_id.forward)
|
||||
{
|
||||
@ -102,7 +102,7 @@ void checkWeightsConsistency(
|
||||
EdgeWeight weight = std::accumulate(range.begin(), range.end(), EdgeWeight{0});
|
||||
if (weight > edge.data.weight)
|
||||
{
|
||||
util::Log(logWARNING) << geometry_id.id << " vs " << edge.data.edge_id << ":"
|
||||
util::Log(logWARNING) << geometry_id.id << " vs " << edge.data.turn_id << ":"
|
||||
<< weight << " > " << edge.data.weight;
|
||||
}
|
||||
}
|
||||
@ -112,7 +112,7 @@ void checkWeightsConsistency(
|
||||
EdgeWeight weight = std::accumulate(range.begin(), range.end(), EdgeWeight{0});
|
||||
if (weight > edge.data.weight)
|
||||
{
|
||||
util::Log(logWARNING) << geometry_id.id << " vs " << edge.data.edge_id << ":"
|
||||
util::Log(logWARNING) << geometry_id.id << " vs " << edge.data.turn_id << ":"
|
||||
<< weight << " > " << edge.data.weight;
|
||||
}
|
||||
}
|
||||
@ -583,7 +583,7 @@ Updater::LoadAndUpdateEdgeExpandedGraph(std::vector<extractor::EdgeBasedEdge> &e
|
||||
});
|
||||
|
||||
const auto update_edge = [&](extractor::EdgeBasedEdge &edge) {
|
||||
const auto geometry_id = edge_data[edge.data.edge_id].via_geometry;
|
||||
const auto geometry_id = edge_data[edge.data.turn_id].via_geometry;
|
||||
auto updated_iter = std::lower_bound(updated_segments.begin(),
|
||||
updated_segments.end(),
|
||||
geometry_id,
|
||||
@ -619,8 +619,8 @@ Updater::LoadAndUpdateEdgeExpandedGraph(std::vector<extractor::EdgeBasedEdge> &e
|
||||
}
|
||||
|
||||
// Get the turn penalty and update to the new value if required
|
||||
auto turn_weight_penalty = turn_weight_penalties[edge.data.edge_id];
|
||||
auto turn_duration_penalty = turn_duration_penalties[edge.data.edge_id];
|
||||
auto turn_weight_penalty = turn_weight_penalties[edge.data.turn_id];
|
||||
auto turn_duration_penalty = turn_duration_penalties[edge.data.turn_id];
|
||||
const auto num_nodes = segment_data.GetForwardGeometry(geometry_id.id).size();
|
||||
const auto weight_min_value = static_cast<EdgeWeight>(num_nodes);
|
||||
if (turn_weight_penalty + new_weight < weight_min_value)
|
||||
@ -631,7 +631,7 @@ Updater::LoadAndUpdateEdgeExpandedGraph(std::vector<extractor::EdgeBasedEdge> &e
|
||||
<< " is too negative: clamping turn weight to "
|
||||
<< weight_min_value;
|
||||
turn_weight_penalty = weight_min_value - new_weight;
|
||||
turn_weight_penalties[edge.data.edge_id] = turn_weight_penalty;
|
||||
turn_weight_penalties[edge.data.turn_id] = turn_weight_penalty;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -35,21 +35,21 @@ $(DATA_NAME)_%.osrm: $(DATA_NAME)_%.osm.pbf $(DATA_NAME).poly $(PROFILE) $(OSRM_
|
||||
@echo "Verifiyng data file integrity..."
|
||||
$(MD5SUM) -c data.md5sum
|
||||
@echo "Running osrm-extract..."
|
||||
$(TIMER) "osrm-extract" $(OSRM_EXTRACT) $< -p $(PROFILE)
|
||||
$(TIMER) "osrm-extract\t$@" $(OSRM_EXTRACT) $< -p $(PROFILE)
|
||||
|
||||
$(DATA_NAME)_CH.osrm.hsgr: $(DATA_NAME)_CH.osrm $(PROFILE) $(OSRM_CONTRACT)
|
||||
@echo "Running osrm-contract..."
|
||||
$(TIMER) "osrm-contract" $(OSRM_CONTRACT) $<
|
||||
$(TIMER) "osrm-contract\t$@" $(OSRM_CONTRACT) $<
|
||||
|
||||
$(DATA_NAME)_CoreCH.osrm.hsgr: $(DATA_NAME)_CoreCH.osrm $(PROFILE) $(OSRM_CONTRACT)
|
||||
@echo "Running osrm-contract..."
|
||||
$(TIMER) "osrm-contract" $(OSRM_CONTRACT) --core=0.95 $<
|
||||
$(TIMER) "osrm-contract\t$@" $(OSRM_CONTRACT) --core=0.95 $<
|
||||
|
||||
$(DATA_NAME)_MLD.osrm.partition: $(DATA_NAME)_MLD.osrm $(PROFILE) $(OSRM_PARTITION)
|
||||
@echo "Running osrm-partition..."
|
||||
$(TIMER) "osrm-contract" $(OSRM_CONTRACT) $<
|
||||
$(TIMER) "osrm-partition" $(OSRM_PARTITION) $<
|
||||
$(TIMER) "osrm-customize" $(OSRM_CUSTOMIZE) $<
|
||||
$(TIMER) "osrm-contract\t$@" $(OSRM_CONTRACT) $<
|
||||
$(TIMER) "osrm-partition\t$@" $(OSRM_PARTITION) $<
|
||||
$(TIMER) "osrm-customize\t$@" $(OSRM_CUSTOMIZE) $<
|
||||
|
||||
$(DATA_NAME).requests: $(DATA_NAME).poly
|
||||
$(POLY2REQ) $(DATA_NAME).poly > $(DATA_NAME).requests
|
||||
@ -58,17 +58,17 @@ benchmark: data $(DATA_NAME).requests
|
||||
@echo "Running benchmark..."
|
||||
@/bin/sh -c '$(OSRM_ROUTED) --algorithm=CH $(DATA_NAME)_CH.osrm > /dev/null & echo "$$!" > osrm-routed.pid'
|
||||
@sleep 1
|
||||
$(TIMER) "queries" "cat $(DATA_NAME).requests | xargs curl &> /dev/null"
|
||||
$(TIMER) "queries\tCH" "cat $(DATA_NAME).requests | xargs curl &> /dev/null"
|
||||
@cat osrm-routed.pid | xargs kill
|
||||
@rm osrm-routed.pid
|
||||
@/bin/sh -c '$(OSRM_ROUTED) --algorithm=CoreCH $(DATA_NAME)_CoreCH.osrm > /dev/null & echo "$$!" > osrm-routed.pid'
|
||||
@sleep 1
|
||||
$(TIMER) "queries" "cat $(DATA_NAME).requests | xargs curl &> /dev/null"
|
||||
$(TIMER) "queries\tCoreCH" "cat $(DATA_NAME).requests | xargs curl &> /dev/null"
|
||||
@cat osrm-routed.pid | xargs kill
|
||||
@rm osrm-routed.pid
|
||||
@/bin/sh -c '$(OSRM_ROUTED) --algorithm=MLD $(DATA_NAME)_MLD.osrm > /dev/null & echo "$$!" > osrm-routed.pid'
|
||||
@sleep 1
|
||||
$(TIMER) "queries" "cat $(DATA_NAME).requests | xargs curl &> /dev/null"
|
||||
$(TIMER) "queries\tMLD" "cat $(DATA_NAME).requests | xargs curl &> /dev/null"
|
||||
@cat osrm-routed.pid | xargs kill
|
||||
@rm osrm-routed.pid
|
||||
@echo "**** timings ***"
|
||||
|
Loading…
Reference in New Issue
Block a user