Merge branch 'develop'

This commit is contained in:
Patrick Niklaus 2015-06-22 13:06:50 +02:00
commit 300d901618
91 changed files with 2540 additions and 2267 deletions

4
.gitignore vendored
View File

@ -1,3 +1,7 @@
# pre compiled dependencies #
#############################
osrm-deps
# Compiled source # # Compiled source #
################### ###################
*.com *.com

View File

@ -30,6 +30,7 @@ script:
- make - make
- make tests - make tests
- make benchmarks - make benchmarks
- ./algorithm-tests
- ./datastructure-tests - ./datastructure-tests
- cd .. - cd ..
- cucumber -p verify - cucumber -p verify
@ -58,7 +59,7 @@ notifications:
skip_join: false skip_join: false
recipients: recipients:
- dennis@mapbox.com - patrick@mapbox.com
email: email:
on_success: change on_success: change
on_failure: always on_failure: always

View File

@ -90,7 +90,7 @@ set(
add_library(COORDINATE OBJECT ${CoordinateGlob}) add_library(COORDINATE OBJECT ${CoordinateGlob})
add_library(GITDESCRIPTION OBJECT util/git_sha.cpp) add_library(GITDESCRIPTION OBJECT util/git_sha.cpp)
add_library(OSRM ${OSRMSources} $<TARGET_OBJECTS:ANGLE> $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:GITDESCRIPTION> $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:PHANTOMNODE> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR>) add_library(OSRM ${OSRMSources} $<TARGET_OBJECTS:ANGLE> $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:GITDESCRIPTION> $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:PHANTOMNODE> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR> $<TARGET_OBJECTS:IMPORT>)
add_library(FINGERPRINT OBJECT util/fingerprint.cpp) add_library(FINGERPRINT OBJECT util/fingerprint.cpp)
add_dependencies(FINGERPRINT FingerPrintConfigure) add_dependencies(FINGERPRINT FingerPrintConfigure)
@ -328,12 +328,10 @@ if(WITH_TOOLS OR BUILD_TOOLS)
if(UNIX AND NOT APPLE) if(UNIX AND NOT APPLE)
target_link_libraries(osrm-unlock-all rt) target_link_libraries(osrm-unlock-all rt)
endif() endif()
add_executable(osrm-check-hsgr tools/check-hsgr.cpp $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:LOGGER>) add_executable(osrm-check-hsgr tools/check-hsgr.cpp $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:IMPORT>)
target_link_libraries(osrm-check-hsgr ${Boost_LIBRARIES}) target_link_libraries(osrm-check-hsgr ${Boost_LIBRARIES} ${TBB_LIBRARIES})
add_executable(osrm-springclean tools/springclean.cpp $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:GITDESCRIPTION> $<TARGET_OBJECTS:EXCEPTION>) add_executable(osrm-springclean tools/springclean.cpp $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:GITDESCRIPTION> $<TARGET_OBJECTS:EXCEPTION>)
target_link_libraries(osrm-springclean ${Boost_LIBRARIES}) target_link_libraries(osrm-springclean ${Boost_LIBRARIES})
add_executable(osrm-graph-compare tools/graph_compare.cpp $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:IMPORT> $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:RESTRICTION> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR>)
target_link_libraries(osrm-graph-compare ${Boost_LIBRARIES} ${TBB_LIBRARIES})
install(TARGETS osrm-cli DESTINATION bin) install(TARGETS osrm-cli DESTINATION bin)
install(TARGETS osrm-io-benchmark DESTINATION bin) install(TARGETS osrm-io-benchmark DESTINATION bin)

View File

@ -2,22 +2,27 @@ GEM
remote: http://rubygems.org/ remote: http://rubygems.org/
specs: specs:
builder (3.2.2) builder (3.2.2)
cucumber (1.3.8) cucumber (2.0.0)
builder (>= 2.1.2) builder (>= 2.1.2)
cucumber-core (~> 1.1.3)
diff-lcs (>= 1.1.3) diff-lcs (>= 1.1.3)
gherkin (~> 2.12.1) gherkin (~> 2.12)
multi_json (>= 1.7.5, < 2.0) multi_json (>= 1.7.5, < 2.0)
multi_test (>= 0.0.2) multi_test (>= 0.1.2)
diff-lcs (1.2.4) cucumber-core (1.1.3)
gherkin (2.12.1) gherkin (~> 2.12.0)
diff-lcs (1.2.5)
gherkin (2.12.2)
multi_json (~> 1.3) multi_json (~> 1.3)
multi_json (1.8.0) multi_json (1.11.0)
multi_test (0.0.2) multi_test (0.1.2)
osmlib-base (0.1.4) osmlib-base (0.1.4)
rake (10.1.0) rake (10.4.2)
rspec-expectations (2.14.3) rspec-expectations (3.2.1)
diff-lcs (>= 1.1.3, < 2.0) diff-lcs (>= 1.2.0, < 2.0)
sys-proctable (0.9.3) rspec-support (~> 3.2.0)
rspec-support (3.2.2)
sys-proctable (0.9.8)
PLATFORMS PLATFORMS
ruby ruby

View File

@ -1,12 +1,25 @@
# Readme ## About
For instructions on how to compile and run OSRM, please consult the Wiki at The Open Source Routing Machine is a high performance routing engine written in C++11 designed to run on OpenStreetMap data.
https://github.com/Project-OSRM/osrm-backend/wiki ## Current build status
or use our free and daily updated online service at | build config | branch | status |
|:-------------|:--------|:------------|
| Linux | master | [![Build Status](https://travis-ci.org/Project-OSRM/osrm-backend.png?branch=master)](https://travis-ci.org/Project-OSRM/osrm-backend) |
| Linux | develop | [![Build Status](https://travis-ci.org/Project-OSRM/osrm-backend.png?branch=develop)](https://travis-ci.org/Project-OSRM/osrm-backend) |
| Windows | master/develop | [![Build status](https://ci.appveyor.com/api/projects/status/4iuo3s9gxprmcjjh)](https://ci.appveyor.com/project/DennisOSRM/osrm-backend) |
| LUAbind fork | master | [![Build Status](https://travis-ci.org/DennisOSRM/luabind.png?branch=master)](https://travis-ci.org/DennisOSRM/luabind) |
http://map.project-osrm.org ## Building
For instructions on how to [build](https://github.com/Project-OSRM/osrm-backend/wiki/Building-OSRM) and [run OSRM](https://github.com/Project-OSRM/osrm-backend/wiki/Running-OSRM), please consult [the Wiki](https://github.com/Project-OSRM/osrm-backend/wiki).
To quickly try OSRM use our [free and daily updated online service](http://map.project-osrm.org)
## Documentation
See the Wiki's [server API documentation](https://github.com/Project-OSRM/osrm-backend/wiki/Server-api) as well as the [library API documentation](https://github.com/Project-OSRM/osrm-backend/wiki/Library-api)
## References in publications ## References in publications
@ -31,11 +44,3 @@ When using the code in a (scientific) publication, please cite
} }
``` ```
## Current build status
| build config | branch | status |
|:-------------|:--------|:------------|
| Linux | master | [![Build Status](https://travis-ci.org/Project-OSRM/osrm-backend.png?branch=master)](https://travis-ci.org/DennisOSRM/Project-OSRM) |
| Linux | develop | [![Build Status](https://travis-ci.org/Project-OSRM/osrm-backend.png?branch=develop)](https://travis-ci.org/DennisOSRM/Project-OSRM) |
| Windows | master/develop | [![Build status](https://ci.appveyor.com/api/projects/status/4iuo3s9gxprmcjjh)](https://ci.appveyor.com/project/DennisOSRM/osrm-backend) |
| LUAbind fork | master | [![Build Status](https://travis-ci.org/DennisOSRM/luabind.png?branch=master)](https://travis-ci.org/DennisOSRM/luabind) |

View File

@ -88,3 +88,41 @@ PolylineCompressor::get_encoded_string(const std::vector<SegmentInformation> &po
} }
return encode_vector(delta_numbers); return encode_vector(delta_numbers);
} }
std::vector<FixedPointCoordinate> PolylineCompressor::decode_string(const std::string &geometry_string) const
{
std::vector<FixedPointCoordinate> new_coordinates;
int index = 0, len = geometry_string.size();
int lat = 0, lng = 0;
while (index < len)
{
int b, shift = 0, result = 0;
do
{
b = geometry_string.at(index++) - 63;
result |= (b & 0x1f) << shift;
shift += 5;
} while (b >= 0x20);
int dlat = ((result & 1) != 0 ? ~(result >> 1) : (result >> 1));
lat += dlat;
shift = 0;
result = 0;
do
{
b = geometry_string.at(index++) - 63;
result |= (b & 0x1f) << shift;
shift += 5;
} while (b >= 0x20);
int dlng = ((result & 1) != 0 ? ~(result >> 1) : (result >> 1));
lng += dlng;
FixedPointCoordinate p;
p.lat = COORDINATE_PRECISION * (((double) lat / 1E6));
p.lon = COORDINATE_PRECISION * (((double) lng / 1E6));
new_coordinates.push_back(p);
}
return new_coordinates;
}

View File

@ -30,6 +30,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
struct SegmentInformation; struct SegmentInformation;
#include <osrm/coordinate.hpp>
#include <string> #include <string>
#include <vector> #include <vector>
@ -42,6 +44,8 @@ class PolylineCompressor
public: public:
std::string get_encoded_string(const std::vector<SegmentInformation> &polyline) const; std::string get_encoded_string(const std::vector<SegmentInformation> &polyline) const;
std::vector<FixedPointCoordinate> decode_string(const std::string &geometry_string) const;
}; };
#endif /* POLYLINECOMPRESSOR_H_ */ #endif /* POLYLINECOMPRESSOR_H_ */

View File

@ -150,7 +150,7 @@ template <typename GraphT> class TarjanSCC
// Traverse outgoing edges // Traverse outgoing edges
if (barrier_node_set.find(v) != barrier_node_set.end() && u != vprime) if (barrier_node_set.find(v) != barrier_node_set.end() && u != vprime)
{ {
// continue; continue;
} }
if (to_node_of_only_restriction != std::numeric_limits<unsigned>::max() && if (to_node_of_only_restriction != std::numeric_limits<unsigned>::max() &&

113
appveyor-build.bat Normal file
View File

@ -0,0 +1,113 @@
@ECHO OFF
SETLOCAL
SET EL=0
ECHO platform^: %platform%
:: HARDCODE "x64" as it is uppercase on AppVeyor and download from S3 is case sensitive
SET DEPSPKG=osrm-deps-win-x64-14.0.7z
:: local development
IF "%computername%"=="MB" GOTO SKIPDL
IF EXIST %DEPSPKG% DEL %DEPSPKG%
IF %ERRORLEVEL% NEQ 0 GOTO ERROR
ECHO downloading %DEPSPKG%
powershell Invoke-WebRequest https://mapbox.s3.amazonaws.com/windows-builds/windows-deps/$env:DEPSPKG -OutFile C:\projects\osrm\$env:DEPSPKG
IF %ERRORLEVEL% NEQ 0 GOTO ERROR
7z -y x %DEPSPKG% | %windir%\system32\FIND "ing archive"
IF %ERRORLEVEL% NEQ 0 GOTO ERROR
:SKIPDL
IF EXIST build rd /s /q build
IF %ERRORLEVEL% NEQ 0 GOTO ERROR
mkdir build
IF %ERRORLEVEL% NEQ 0 GOTO ERROR
cd build
IF %ERRORLEVEL% NEQ 0 GOTO ERROR
SET OSRMDEPSDIR=c:\projects\osrm\osrm-deps
set PREFIX=%OSRMDEPSDIR%/libs
set BOOST_ROOT=%OSRMDEPSDIR%/boost
set TBB_INSTALL_DIR=%OSRMDEPSDIR%/tbb
set TBB_ARCH_PLATFORM=intel64/vc14
ECHO calling cmake ....
cmake .. ^
-G "Visual Studio 14 Win64" ^
-DBOOST_ROOT=%BOOST_ROOT% ^
-DBoost_ADDITIONAL_VERSIONS=1.57 ^
-DBoost_USE_MULTITHREADED=ON ^
-DBoost_USE_STATIC_LIBS=ON ^
-DCMAKE_BUILD_TYPE=%CONFIGURATION% ^
-DCMAKE_INSTALL_PREFIX=%PREFIX%
IF %ERRORLEVEL% NEQ 0 GOTO ERROR
ECHO building ...
msbuild OSRM.sln ^
/p:Configuration=%Configuration% ^
/p:Platform=x64 ^
/t:rebuild ^
/p:BuildInParallel=true ^
/m:%NUMBER_OF_PROCESSORS% ^
/toolsversion:14.0 ^
/p:PlatformToolset=v140 ^
/clp:Verbosity=normal ^
/nologo ^
/flp1:logfile=build_errors.txt;errorsonly ^
/flp2:logfile=build_warnings.txt;warningsonly
IF %ERRORLEVEL% NEQ 0 GOTO ERROR
ECHO ========= TODO^: CREATE PACKAGES ==========
CD c:\projects\osrm\build\%Configuration%
IF %ERRORLEVEL% NEQ 0 GOTO ERROR
SET PATH=c:\projects\osrm\osrm-deps\libs\bin;%PATH%
ECHO running datastructure-tests.exe ...
datastructure-tests.exe
IF %ERRORLEVEL% NEQ 0 GOTO ERROR
ECHO running algorithm-tests.exe ...
algorithm-tests.exe
IF %ERRORLEVEL% NEQ 0 GOTO ERROR
GOTO DONE
:ERROR
SET EL=%ERRORLEVEL%
ECHO ============== ERROR ===============
:DONE
ECHO ============= DONE ===============
CD C:\projects\osrm
EXIT /b %EL%
- cd c:/projects/osrm
- mkdir build
- cd build
- echo Running cmake...
- call "%VS120COMNTOOLS%\..\..\VC\vcvarsall.bat" x86_amd64
- SET PATH=C:\Program Files (x86)\MSBuild\12.0\bin\;%PATH%
- SET P=c:/projects/osrm
- set TBB_INSTALL_DIR=%P%/tbb
- set TBB_ARCH_PLATFORM=intel64/vc12
- cmake .. -G "Visual Studio 14 Win64" -DCMAKE_BUILD_TYPE=%Configuration% -DCMAKE_INSTALL_PREFIX=%P%/libs -DBOOST_ROOT=%P%/boost_min -DBoost_ADDITIONAL_VERSIONS=1.57 -DBoost_USE_STATIC_LIBS=ON
- SET PLATFORM_TOOLSET=v140
- SET TOOLS_VERSION=14.0
- msbuild /p:Platform=x64 /clp:Verbosity=minimal /toolsversion:%TOOLS_VERSION% /p:PlatformToolset=%PLATFORM_TOOLSET% /nologo OSRM.sln
- msbuild /p:Platform=x64 /clp:Verbosity=minimal /toolsversion:%TOOLS_VERSION% /p:PlatformToolset=%PLATFORM_TOOLSET% /nologo tests.vcxproj
- cd %Configuration%
- if "%APPVEYOR_REPO_BRANCH%"=="develop" (7z a %P%/osrm_%Configuration%.zip *.exe *.pdb %P%/libs/bin/*.dll -tzip)
- cd ..\..\profiles
- echo disk=c:\temp\stxxl,10000,wincall > .stxxl.txt
- if "%APPVEYOR_REPO_BRANCH%"=="develop" (7z a %P%/osrm_%Configuration%.zip * -tzip)
- set PATH=%PATH%;c:/projects/osrm/libs/bin
- cd c:/projects/osrm/build/%Configuration%
- datastructure-tests.exe
- algorithm-tests.exe

View File

@ -1,60 +1,33 @@
environment: environment:
matrix: matrix:
- configuration: Debug
- configuration: Release - configuration: Release
# - configuration: Debug
# branches to build
branches:
# whitelist
only:
- develop
# scripts that are called at very beginning, before repo cloning # scripts that are called at very beginning, before repo cloning
init: init:
- git config --global core.autocrlf input - git config --global core.autocrlf input
os: Visual Studio 2015 RC
# clone directory # clone directory
clone_folder: c:\projects\osrm clone_folder: c:\projects\osrm
platform: x64 platform: x64
install: install:
# by default, all script lines are interpreted as batch - set PATH=C:\Program Files (x86)\MSBuild\14.0\Bin;%PATH%
- nuget install protobuf - CALL "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64
- cd c:\projects\osrm
- curl -O http://build.project-osrm.org/libs_osrm_%Configuration%.7z
- 7z x libs_osrm_%Configuration%.7z | find ":"
build_script: build_script:
- cd c:/projects/osrm - CALL appveyor-build.bat
- mkdir build
- cd build
- echo Running cmake...
- call "%VS120COMNTOOLS%\..\..\VC\vcvarsall.bat" x86_amd64
- SET PATH=C:\Program Files (x86)\MSBuild\12.0\bin\;%PATH%
- SET P=c:/projects/osrm
- set TBB_INSTALL_DIR=%P%/tbb
- set TBB_ARCH_PLATFORM=intel64/vc12
- cmake .. -G "Visual Studio 12 Win64" -DCMAKE_BUILD_TYPE=%Configuration% -DCMAKE_INSTALL_PREFIX=%P%/libs -DBOOST_ROOT=%P%/boost_min -DBoost_ADDITIONAL_VERSIONS=1.57 -DBoost_USE_STATIC_LIBS=ON -T CTP_Nov2013
- msbuild /clp:Verbosity=minimal /nologo OSRM.sln
- msbuild /clp:Verbosity=minimal /nologo tests.vcxproj
- cd %Configuration%
- if "%APPVEYOR_REPO_BRANCH%"=="develop" (7z a %P%/osrm_%Configuration%.zip *.exe *.pdb %P%/libs/bin/*.dll -tzip)
- cd ..\..\profiles
- echo disk=c:\temp\stxxl,10000,wincall > .stxxl.txt
- if "%APPVEYOR_REPO_BRANCH%"=="develop" (7z a %P%/osrm_%Configuration%.zip * -tzip)
- set PATH=%PATH%;c:/projects/osrm/libs/bin
- cd c:/projects/osrm/build/%Configuration%
- datastructure-tests.exe
- algorithm-tests.exe
test: off test: off
artifacts: artifacts:
- path: osrm_Debug.zip
name: osrm_Debug.zip
- path: osrm_Release.zip - path: osrm_Release.zip
name: osrm_Release.zip name: osrm_Release.zip
# - path: osrm_Debug.zip
# name: osrm_Debug.zip
deploy: deploy:
provider: FTP provider: FTP

19
build-local.bat Normal file
View File

@ -0,0 +1,19 @@
@ECHO OFF
SET PLATFORM=x64
SET CONFIGURATION=Release
WHERE msbuild
IF %ERRORLEVEL% EQU 0 GOTO RUNBUILD
SET PATH=C:\mb\windows-builds-64\tmp-bin\cmake-3.1.0-win32-x86\bin;%PATH%
SET PATH=C:\Program Files\7-Zip;%PATH%
ECHO activating VS command prompt ...
SET PATH=C:\Program Files (x86)\MSBuild\14.0\Bin;%PATH%
CALL "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64
:RUNBUILD
powershell Set-ExecutionPolicy -Scope CurrentUser -ExecutionPolicy Unrestricted -Force
CALL appveyor-build.bat
EXIT /b %ERRORLEVEL%

View File

@ -171,7 +171,8 @@ class Contractor
{ {
SimpleLogger().Write(logWARNING) SimpleLogger().Write(logWARNING)
<< "Edge weight large -> " << "Edge weight large -> "
<< static_cast<unsigned int>(std::max(diter->weight, 1)); << static_cast<unsigned int>(std::max(diter->weight, 1)) << " : "
<< static_cast<unsigned int>(diter->source) << " -> " << static_cast<unsigned int>(diter->target);
} }
#endif #endif
edges.emplace_back(diter->source, diter->target, edges.emplace_back(diter->source, diter->target,
@ -186,6 +187,7 @@ class Contractor
} }
// clear input vector // clear input vector
input_edge_list.clear(); input_edge_list.clear();
// FIXME not sure if we need this
edges.shrink_to_fit(); edges.shrink_to_fit();
tbb::parallel_sort(edges.begin(), edges.end()); tbb::parallel_sort(edges.begin(), edges.end());
@ -953,7 +955,6 @@ class Contractor
} }
std::shared_ptr<ContractorGraph> contractor_graph; std::shared_ptr<ContractorGraph> contractor_graph;
std::vector<ContractorGraph::InputEdge> contracted_edge_list;
stxxl::vector<QueryEdge> external_edge_list; stxxl::vector<QueryEdge> external_edge_list;
std::vector<NodeID> orig_node_id_to_new_id_map; std::vector<NodeID> orig_node_id_to_new_id_map;
XORFastHash fast_hash; XORFastHash fast_hash;

View File

@ -0,0 +1,135 @@
/*
Copyright (c) 2015, Project OSRM contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "contractor_options.hpp"
#include "../util/git_sha.hpp"
#include "../util/simple_logger.hpp"
#include <boost/filesystem.hpp>
#include <boost/program_options.hpp>
#include <tbb/task_scheduler_init.h>
return_code
ContractorOptions::ParseArguments(int argc, char *argv[], ContractorConfig &contractor_config)
{
// declare a group of options that will be allowed only on command line
boost::program_options::options_description generic_options("Options");
generic_options.add_options()("version,v", "Show version")("help,h", "Show this help message")(
"config,c", boost::program_options::value<boost::filesystem::path>(&contractor_config.config_file_path)
->default_value("contractor.ini"),
"Path to a configuration file.");
// declare a group of options that will be allowed both on command line and in config file
boost::program_options::options_description config_options("Configuration");
config_options.add_options()(
"restrictions,r",
boost::program_options::value<boost::filesystem::path>(&contractor_config.restrictions_path),
"Restrictions file in .osrm.restrictions format")(
"profile,p", boost::program_options::value<boost::filesystem::path>(&contractor_config.profile_path)
->default_value("profile.lua"),
"Path to LUA routing profile")(
"threads,t", boost::program_options::value<unsigned int>(&contractor_config.requested_num_threads)
->default_value(tbb::task_scheduler_init::default_num_threads()),
"Number of threads to use");
// hidden options, will be allowed both on command line and in config file, but will not be
// shown to the user
boost::program_options::options_description hidden_options("Hidden options");
hidden_options.add_options()(
"input,i", boost::program_options::value<boost::filesystem::path>(&contractor_config.osrm_input_path),
"Input file in .osm, .osm.bz2 or .osm.pbf format");
// positional option
boost::program_options::positional_options_description positional_options;
positional_options.add("input", 1);
// combine above options for parsing
boost::program_options::options_description cmdline_options;
cmdline_options.add(generic_options).add(config_options).add(hidden_options);
boost::program_options::options_description config_file_options;
config_file_options.add(config_options).add(hidden_options);
boost::program_options::options_description visible_options(
"Usage: " + boost::filesystem::basename(argv[0]) + " <input.osrm> [options]");
visible_options.add(generic_options).add(config_options);
// parse command line options
boost::program_options::variables_map option_variables;
boost::program_options::store(boost::program_options::command_line_parser(argc, argv)
.options(cmdline_options)
.positional(positional_options)
.run(),
option_variables);
const auto &temp_config_path = option_variables["config"].as<boost::filesystem::path>();
if (boost::filesystem::is_regular_file(temp_config_path))
{
boost::program_options::store(boost::program_options::parse_config_file<char>(
temp_config_path.string().c_str(), cmdline_options, true),
option_variables);
}
if (option_variables.count("version"))
{
SimpleLogger().Write() << g_GIT_DESCRIPTION;
return return_code::exit;
}
if (option_variables.count("help"))
{
SimpleLogger().Write() << "\n" << visible_options;
return return_code::exit;
}
boost::program_options::notify(option_variables);
if (!option_variables.count("restrictions"))
{
contractor_config.restrictions_path = contractor_config.osrm_input_path.string() + ".restrictions";
}
if (!option_variables.count("input"))
{
SimpleLogger().Write() << "\n" << visible_options;
return return_code::fail;
}
return return_code::ok;
}
void ContractorOptions::GenerateOutputFilesNames(ContractorConfig &contractor_config)
{
contractor_config.node_output_path = contractor_config.osrm_input_path.string() + ".nodes";
contractor_config.edge_output_path = contractor_config.osrm_input_path.string() + ".edges";
contractor_config.geometry_output_path = contractor_config.osrm_input_path.string() + ".geometry";
contractor_config.graph_output_path = contractor_config.osrm_input_path.string() + ".hsgr";
contractor_config.rtree_nodes_output_path = contractor_config.osrm_input_path.string() + ".ramIndex";
contractor_config.rtree_leafs_output_path = contractor_config.osrm_input_path.string() + ".fileIndex";
}

View File

@ -0,0 +1,68 @@
/*
Copyright (c) 2015, Project OSRM contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CONTRACTOR_OPTIONS_HPP
#define CONTRACTOR_OPTIONS_HPP
#include <boost/filesystem/path.hpp>
#include <string>
enum class return_code : unsigned
{
ok,
fail,
exit
};
struct ContractorConfig
{
ContractorConfig() noexcept : requested_num_threads(0) {}
boost::filesystem::path config_file_path;
boost::filesystem::path osrm_input_path;
boost::filesystem::path restrictions_path;
boost::filesystem::path profile_path;
std::string node_output_path;
std::string edge_output_path;
std::string geometry_output_path;
std::string graph_output_path;
std::string rtree_nodes_output_path;
std::string rtree_leafs_output_path;
unsigned requested_num_threads;
};
struct ContractorOptions
{
static return_code ParseArguments(int argc, char *argv[], ContractorConfig &extractor_config);
static void GenerateOutputFilesNames(ContractorConfig &extractor_config);
};
#endif // EXTRACTOR_OPTIONS_HPP

View File

@ -40,21 +40,21 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <iomanip> #include <iomanip>
#include <limits> #include <limits>
EdgeBasedGraphFactory::EdgeBasedGraphFactory( EdgeBasedGraphFactory::EdgeBasedGraphFactory(std::shared_ptr<NodeBasedDynamicGraph> node_based_graph,
const std::shared_ptr<NodeBasedDynamicGraph> &node_based_graph, std::shared_ptr<RestrictionMap> restriction_map,
std::unique_ptr<RestrictionMap> restriction_map, std::unique_ptr<std::vector<NodeID>> barrier_node_list,
std::vector<NodeID> &barrier_node_list, std::unique_ptr<std::vector<NodeID>> traffic_light_node_list,
std::vector<NodeID> &traffic_light_node_list, const std::vector<QueryNode> &node_info_list,
std::vector<QueryNode> &node_info_list, const SpeedProfileProperties &speed_profile)
SpeedProfileProperties &speed_profile)
: speed_profile(speed_profile), : speed_profile(speed_profile),
m_number_of_edge_based_nodes(std::numeric_limits<unsigned>::max()), m_number_of_edge_based_nodes(std::numeric_limits<unsigned>::max()),
m_node_info_list(node_info_list), m_node_based_graph(node_based_graph), m_node_info_list(node_info_list),
m_node_based_graph(std::move(node_based_graph)),
m_restriction_map(std::move(restriction_map)), max_id(0), removed_node_count(0) m_restriction_map(std::move(restriction_map)), max_id(0), removed_node_count(0)
{ {
// insert into unordered sets for fast lookup // insert into unordered sets for fast lookup
m_barrier_nodes.insert(barrier_node_list.begin(), barrier_node_list.end()); m_barrier_nodes.insert(barrier_node_list->begin(), barrier_node_list->end());
m_traffic_lights.insert(traffic_light_node_list.begin(), traffic_light_node_list.end()); m_traffic_lights.insert(traffic_light_node_list->begin(), traffic_light_node_list->end());
} }
void EdgeBasedGraphFactory::GetEdgeBasedEdges(DeallocatingVector<EdgeBasedEdge> &output_edge_list) void EdgeBasedGraphFactory::GetEdgeBasedEdges(DeallocatingVector<EdgeBasedEdge> &output_edge_list)
@ -193,15 +193,16 @@ void EdgeBasedGraphFactory::InsertEdgeBasedNode(const NodeID node_u,
{ {
BOOST_ASSERT(forward_data.forward); BOOST_ASSERT(forward_data.forward);
} }
else
{
BOOST_ASSERT(!forward_data.forward);
}
if (reverse_data.edgeBasedNodeID != SPECIAL_NODEID) if (reverse_data.edgeBasedNodeID != SPECIAL_NODEID)
{ {
BOOST_ASSERT(reverse_data.forward); BOOST_ASSERT(reverse_data.forward);
} }
if (forward_data.edgeBasedNodeID == SPECIAL_NODEID) else
{
BOOST_ASSERT(!forward_data.forward);
}
if (reverse_data.edgeBasedNodeID == SPECIAL_NODEID)
{ {
BOOST_ASSERT(!reverse_data.forward); BOOST_ASSERT(!reverse_data.forward);
} }
@ -233,7 +234,6 @@ void EdgeBasedGraphFactory::Run(const std::string &original_edge_data_filename,
const std::string &geometry_filename, const std::string &geometry_filename,
lua_State *lua_state) lua_State *lua_state)
{ {
TIMER_START(geometry); TIMER_START(geometry);
CompressGeometry(); CompressGeometry();
TIMER_STOP(geometry); TIMER_STOP(geometry);
@ -290,12 +290,33 @@ void EdgeBasedGraphFactory::CompressGeometry()
continue; continue;
} }
/*
* reverse_e2 forward_e2
* u <---------- v -----------> w
* ----------> <-----------
* forward_e1 reverse_e1
*
* Will be compressed to:
*
* reverse_e1
* u <---------- w
* ---------->
* forward_e1
*
* If the edges are compatible.
*
*/
const bool reverse_edge_order = const bool reverse_edge_order =
!(m_node_based_graph->GetEdgeData(m_node_based_graph->BeginEdges(node_v)).forward); !(m_node_based_graph->GetEdgeData(m_node_based_graph->BeginEdges(node_v)).forward);
const EdgeID forward_e2 = m_node_based_graph->BeginEdges(node_v) + reverse_edge_order; const EdgeID forward_e2 = m_node_based_graph->BeginEdges(node_v) + reverse_edge_order;
BOOST_ASSERT(SPECIAL_EDGEID != forward_e2); BOOST_ASSERT(SPECIAL_EDGEID != forward_e2);
BOOST_ASSERT(forward_e2 >= m_node_based_graph->BeginEdges(node_v) &&
forward_e2 < m_node_based_graph->EndEdges(node_v));
const EdgeID reverse_e2 = m_node_based_graph->BeginEdges(node_v) + 1 - reverse_edge_order; const EdgeID reverse_e2 = m_node_based_graph->BeginEdges(node_v) + 1 - reverse_edge_order;
BOOST_ASSERT(SPECIAL_EDGEID != reverse_e2); BOOST_ASSERT(SPECIAL_EDGEID != reverse_e2);
BOOST_ASSERT(reverse_e2 >= m_node_based_graph->BeginEdges(node_v) &&
reverse_e2 < m_node_based_graph->EndEdges(node_v));
const EdgeData &fwd_edge_data2 = m_node_based_graph->GetEdgeData(forward_e2); const EdgeData &fwd_edge_data2 = m_node_based_graph->GetEdgeData(forward_e2);
const EdgeData &rev_edge_data2 = m_node_based_graph->GetEdgeData(reverse_e2); const EdgeData &rev_edge_data2 = m_node_based_graph->GetEdgeData(reverse_e2);
@ -322,9 +343,20 @@ void EdgeBasedGraphFactory::CompressGeometry()
continue; continue;
} }
if ( // TODO: rename to IsCompatibleTo // this case can happen if two ways with different names overlap
fwd_edge_data1.IsEqualTo(fwd_edge_data2) && rev_edge_data1.IsEqualTo(rev_edge_data2)) if (fwd_edge_data1.nameID != rev_edge_data1.nameID ||
fwd_edge_data2.nameID != rev_edge_data2.nameID)
{ {
continue;
}
if (fwd_edge_data1.IsCompatibleTo(fwd_edge_data2) && rev_edge_data1.IsCompatibleTo(rev_edge_data2))
{
BOOST_ASSERT(m_node_based_graph->GetEdgeData(forward_e1).nameID ==
m_node_based_graph->GetEdgeData(reverse_e1).nameID);
BOOST_ASSERT(m_node_based_graph->GetEdgeData(forward_e2).nameID ==
m_node_based_graph->GetEdgeData(reverse_e2).nameID);
// Get distances before graph is modified // Get distances before graph is modified
const int forward_weight1 = m_node_based_graph->GetEdgeData(forward_e1).distance; const int forward_weight1 = m_node_based_graph->GetEdgeData(forward_e1).distance;
const int forward_weight2 = m_node_based_graph->GetEdgeData(forward_e2).distance; const int forward_weight2 = m_node_based_graph->GetEdgeData(forward_e2).distance;
@ -336,7 +368,7 @@ void EdgeBasedGraphFactory::CompressGeometry()
const int reverse_weight2 = m_node_based_graph->GetEdgeData(reverse_e2).distance; const int reverse_weight2 = m_node_based_graph->GetEdgeData(reverse_e2).distance;
BOOST_ASSERT(0 != reverse_weight1); BOOST_ASSERT(0 != reverse_weight1);
BOOST_ASSERT(0 != forward_weight2); BOOST_ASSERT(0 != reverse_weight2);
const bool has_node_penalty = m_traffic_lights.find(node_v) != m_traffic_lights.end(); const bool has_node_penalty = m_traffic_lights.find(node_v) != m_traffic_lights.end();
@ -362,11 +394,11 @@ void EdgeBasedGraphFactory::CompressGeometry()
// update any involved turn restrictions // update any involved turn restrictions
m_restriction_map->FixupStartingTurnRestriction(node_u, node_v, node_w); m_restriction_map->FixupStartingTurnRestriction(node_u, node_v, node_w);
m_restriction_map->FixupArrivingTurnRestriction(node_u, node_v, node_w, m_restriction_map->FixupArrivingTurnRestriction(node_u, node_v, node_w,
m_node_based_graph); *m_node_based_graph);
m_restriction_map->FixupStartingTurnRestriction(node_w, node_v, node_u); m_restriction_map->FixupStartingTurnRestriction(node_w, node_v, node_u);
m_restriction_map->FixupArrivingTurnRestriction(node_w, node_v, node_u, m_restriction_map->FixupArrivingTurnRestriction(node_w, node_v, node_u,
m_node_based_graph); *m_node_based_graph);
// store compressed geometry in container // store compressed geometry in container
m_geometry_compressor.CompressEdge( m_geometry_compressor.CompressEdge(
@ -378,8 +410,7 @@ void EdgeBasedGraphFactory::CompressGeometry()
reverse_weight2 + (has_node_penalty ? speed_profile.traffic_signal_penalty : 0)); reverse_weight2 + (has_node_penalty ? speed_profile.traffic_signal_penalty : 0));
++removed_node_count; ++removed_node_count;
BOOST_ASSERT(m_node_based_graph->GetEdgeData(forward_e1).nameID ==
m_node_based_graph->GetEdgeData(reverse_e1).nameID);
} }
} }
SimpleLogger().Write() << "removed " << removed_node_count << " nodes"; SimpleLogger().Write() << "removed " << removed_node_count << " nodes";
@ -403,18 +434,19 @@ void EdgeBasedGraphFactory::CompressGeometry()
<< new_edge_count / (double)original_number_of_edges; << new_edge_count / (double)original_number_of_edges;
} }
/** /// Renumbers all _forward_ edges and sets the edgeBasedNodeID.
* Writes the id of the edge in the edge expanded graph (into the edge in the node based graph) /// A specific numbering is not important. Any unique ID will do.
*/
void EdgeBasedGraphFactory::RenumberEdges() void EdgeBasedGraphFactory::RenumberEdges()
{ {
// renumber edge based node IDs // renumber edge based node of outgoing edges
unsigned numbered_edges_count = 0; unsigned numbered_edges_count = 0;
for (const auto current_node : osrm::irange(0u, m_node_based_graph->GetNumberOfNodes())) for (const auto current_node : osrm::irange(0u, m_node_based_graph->GetNumberOfNodes()))
{ {
for (const auto current_edge : m_node_based_graph->GetAdjacentEdgeRange(current_node)) for (const auto current_edge : m_node_based_graph->GetAdjacentEdgeRange(current_node))
{ {
EdgeData &edge_data = m_node_based_graph->GetEdgeData(current_edge); EdgeData &edge_data = m_node_based_graph->GetEdgeData(current_edge);
// this edge is an incoming edge
if (!edge_data.forward) if (!edge_data.forward)
{ {
continue; continue;
@ -466,7 +498,8 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedNodes()
const NodeID node_v = m_node_based_graph->GetTarget(e1); const NodeID node_v = m_node_based_graph->GetTarget(e1);
BOOST_ASSERT(SPECIAL_NODEID != node_v); BOOST_ASSERT(SPECIAL_NODEID != node_v);
// pick only every other edge // pick only every other edge, since we have every edge as an outgoing
// and incoming egde
if (node_u > node_v) if (node_u > node_v)
{ {
continue; continue;
@ -492,6 +525,7 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedNodes()
const bool component_is_tiny = size_of_component < 1000; const bool component_is_tiny = size_of_component < 1000;
// we only set edgeBasedNodeID for forward edges
if (edge_data.edgeBasedNodeID == SPECIAL_NODEID) if (edge_data.edgeBasedNodeID == SPECIAL_NODEID)
{ {
InsertEdgeBasedNode(node_v, node_u, InsertEdgeBasedNode(node_v, node_u,

View File

@ -59,12 +59,12 @@ class EdgeBasedGraphFactory
struct SpeedProfileProperties; struct SpeedProfileProperties;
explicit EdgeBasedGraphFactory(const std::shared_ptr<NodeBasedDynamicGraph> &node_based_graph, explicit EdgeBasedGraphFactory(std::shared_ptr<NodeBasedDynamicGraph> node_based_graph,
std::unique_ptr<RestrictionMap> restricion_map, std::shared_ptr<RestrictionMap> restricion_map,
std::vector<NodeID> &barrier_node_list, std::unique_ptr<std::vector<NodeID>> barrier_node_list,
std::vector<NodeID> &traffic_light_node_list, std::unique_ptr<std::vector<NodeID>> traffic_light_node_list,
std::vector<QueryNode> &node_info_list, const std::vector<QueryNode> &node_info_list,
SpeedProfileProperties &speed_profile); const SpeedProfileProperties &speed_profile);
void Run(const std::string &original_edge_data_filename, void Run(const std::string &original_edge_data_filename,
const std::string &geometry_filename, const std::string &geometry_filename,
@ -97,15 +97,16 @@ class EdgeBasedGraphFactory
unsigned m_number_of_edge_based_nodes; unsigned m_number_of_edge_based_nodes;
std::vector<QueryNode> m_node_info_list;
std::vector<EdgeBasedNode> m_edge_based_node_list; std::vector<EdgeBasedNode> m_edge_based_node_list;
DeallocatingVector<EdgeBasedEdge> m_edge_based_edge_list; DeallocatingVector<EdgeBasedEdge> m_edge_based_edge_list;
const std::vector<QueryNode>& m_node_info_list;
std::shared_ptr<NodeBasedDynamicGraph> m_node_based_graph; std::shared_ptr<NodeBasedDynamicGraph> m_node_based_graph;
std::shared_ptr<RestrictionMap> m_restriction_map;
std::unordered_set<NodeID> m_barrier_nodes; std::unordered_set<NodeID> m_barrier_nodes;
std::unordered_set<NodeID> m_traffic_lights; std::unordered_set<NodeID> m_traffic_lights;
std::unique_ptr<RestrictionMap> m_restriction_map;
GeometryCompressor m_geometry_compressor; GeometryCompressor m_geometry_compressor;

View File

@ -48,7 +48,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <boost/filesystem/fstream.hpp> #include <boost/filesystem/fstream.hpp>
#include <boost/program_options.hpp> #include <boost/program_options.hpp>
#include <tbb/task_scheduler_init.h>
#include <tbb/parallel_sort.h> #include <tbb/parallel_sort.h>
#include <chrono> #include <chrono>
@ -57,166 +56,98 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <thread> #include <thread>
#include <vector> #include <vector>
Prepare::Prepare() : requested_num_threads(1) {}
Prepare::~Prepare() {} Prepare::~Prepare() {}
int Prepare::Process(int argc, char *argv[]) int Prepare::Run()
{ {
LogPolicy::GetInstance().Unmute();
TIMER_START(preparing);
TIMER_START(expansion);
if (!ParseArguments(argc, argv))
{
return 0;
}
if (!boost::filesystem::is_regular_file(input_path))
{
SimpleLogger().Write(logWARNING) << "Input file " << input_path.string() << " not found!";
return 1;
}
if (!boost::filesystem::is_regular_file(profile_path))
{
SimpleLogger().Write(logWARNING) << "Profile " << profile_path.string() << " not found!";
return 1;
}
if (1 > requested_num_threads)
{
SimpleLogger().Write(logWARNING) << "Number of threads must be 1 or larger";
return 1;
}
const unsigned recommended_num_threads = tbb::task_scheduler_init::default_num_threads();
SimpleLogger().Write() << "Input file: " << input_path.filename().string();
SimpleLogger().Write() << "Restrictions file: " << restrictions_path.filename().string();
SimpleLogger().Write() << "Profile: " << profile_path.filename().string();
SimpleLogger().Write() << "Threads: " << requested_num_threads;
if (recommended_num_threads != requested_num_threads)
{
SimpleLogger().Write(logWARNING) << "The recommended number of threads is "
<< recommended_num_threads
<< "! This setting may have performance side-effects.";
}
tbb::task_scheduler_init init(requested_num_threads);
LogPolicy::GetInstance().Unmute();
FingerPrint fingerprint_orig;
CheckRestrictionsFile(fingerprint_orig);
boost::filesystem::ifstream input_stream(input_path, std::ios::in | std::ios::binary);
node_filename = input_path.string() + ".nodes";
edge_out = input_path.string() + ".edges";
geometry_filename = input_path.string() + ".geometry";
graph_out = input_path.string() + ".hsgr";
rtree_nodes_path = input_path.string() + ".ramIndex";
rtree_leafs_path = input_path.string() + ".fileIndex";
/*** Setup Scripting Environment ***/
// Create a new lua state
lua_State *lua_state = luaL_newstate();
// Connect LuaBind to this lua state
luabind::open(lua_state);
EdgeBasedGraphFactory::SpeedProfileProperties speed_profile;
if (!SetupScriptingEnvironment(lua_state, speed_profile))
{
return 1;
}
#ifdef WIN32 #ifdef WIN32
#pragma message("Memory consumption on Windows can be higher due to different bit packing") #pragma message("Memory consumption on Windows can be higher due to different bit packing")
#else #else
static_assert(sizeof(ImportEdge) == 20, static_assert(sizeof(NodeBasedEdge) == 20,
"changing ImportEdge type has influence on memory consumption!"); "changing NodeBasedEdge type has influence on memory consumption!");
static_assert(sizeof(EdgeBasedEdge) == 16,
"changing EdgeBasedEdge type has influence on memory consumption!");
#endif #endif
NodeID number_of_node_based_nodes = readBinaryOSRMGraphFromStream(
input_stream, edge_list, barrier_node_list, traffic_light_list,
&internal_to_external_node_map, restriction_list);
input_stream.close();
if (edge_list.empty()) TIMER_START(preparing);
{
SimpleLogger().Write(logWARNING) << "The input data is empty, exiting.";
return 1;
}
SimpleLogger().Write() << restriction_list.size() << " restrictions, " // Create a new lua state
<< barrier_node_list.size() << " bollard nodes, "
<< traffic_light_list.size() << " traffic lights";
std::vector<EdgeBasedNode> node_based_edge_list; SimpleLogger().Write() << "Generating edge-expanded graph representation";
unsigned number_of_edge_based_nodes = 0;
TIMER_START(expansion);
auto node_based_edge_list = osrm::make_unique<std::vector<EdgeBasedNode>>();;
DeallocatingVector<EdgeBasedEdge> edge_based_edge_list; DeallocatingVector<EdgeBasedEdge> edge_based_edge_list;
auto internal_to_external_node_map = osrm::make_unique<std::vector<QueryNode>>();
auto graph_size =
BuildEdgeExpandedGraph(*internal_to_external_node_map,
*node_based_edge_list, edge_based_edge_list);
// init node_based_edge_list, edge_based_edge_list by edgeList auto number_of_node_based_nodes = graph_size.first;
number_of_edge_based_nodes = auto number_of_edge_based_nodes = graph_size.second;
BuildEdgeExpandedGraph(lua_state, number_of_node_based_nodes, node_based_edge_list,
edge_based_edge_list, speed_profile);
lua_close(lua_state);
TIMER_STOP(expansion); TIMER_STOP(expansion);
BuildRTree(node_based_edge_list); SimpleLogger().Write() << "building r-tree ...";
TIMER_START(rtree);
RangebasedCRC32 crc32; BuildRTree(*node_based_edge_list, *internal_to_external_node_map);
if (crc32.using_hardware())
{
SimpleLogger().Write() << "using hardware based CRC32 computation";
}
else
{
SimpleLogger().Write() << "using software based CRC32 computation";
}
const unsigned crc32_value = crc32(node_based_edge_list); TIMER_STOP(rtree);
node_based_edge_list.clear();
node_based_edge_list.shrink_to_fit();
SimpleLogger().Write() << "CRC32: " << crc32_value;
WriteNodeMapping(); SimpleLogger().Write() << "writing node map ...";
WriteNodeMapping(std::move(internal_to_external_node_map));
/*** // Contracting the edge-expanded graph
* Contracting the edge-expanded graph
*/
SimpleLogger().Write() << "initializing contractor";
auto contractor =
osrm::make_unique<Contractor>(number_of_edge_based_nodes, edge_based_edge_list);
TIMER_START(contraction); TIMER_START(contraction);
contractor->Run(); auto contracted_edge_list = osrm::make_unique<DeallocatingVector<QueryEdge>>();
ContractGraph(number_of_edge_based_nodes, edge_based_edge_list, *contracted_edge_list);
TIMER_STOP(contraction); TIMER_STOP(contraction);
SimpleLogger().Write() << "Contraction took " << TIMER_SEC(contraction) << " sec"; SimpleLogger().Write() << "Contraction took " << TIMER_SEC(contraction) << " sec";
DeallocatingVector<QueryEdge> contracted_edge_list; std::size_t number_of_used_edges = WriteContractedGraph(number_of_edge_based_nodes,
contractor->GetEdges(contracted_edge_list); std::move(node_based_edge_list),
contractor.reset(); std::move(contracted_edge_list));
/*** TIMER_STOP(preparing);
* Sorting contracted edges in a way that the static query graph can read some in in-place.
*/
tbb::parallel_sort(contracted_edge_list.begin(), contracted_edge_list.end()); SimpleLogger().Write() << "Preprocessing : " << TIMER_SEC(preparing) << " seconds";
const unsigned contracted_edge_count = contracted_edge_list.size(); SimpleLogger().Write() << "Expansion : " << (number_of_node_based_nodes / TIMER_SEC(expansion))
<< " nodes/sec and "
<< (number_of_edge_based_nodes / TIMER_SEC(expansion)) << " edges/sec";
SimpleLogger().Write() << "Contraction: "
<< (number_of_edge_based_nodes / TIMER_SEC(contraction))
<< " nodes/sec and " << number_of_used_edges / TIMER_SEC(contraction)
<< " edges/sec";
SimpleLogger().Write() << "finished preprocessing";
return 0;
}
std::size_t Prepare::WriteContractedGraph(unsigned number_of_edge_based_nodes,
std::unique_ptr<std::vector<EdgeBasedNode>> node_based_edge_list,
std::unique_ptr<DeallocatingVector<QueryEdge>> contracted_edge_list)
{
const unsigned crc32_value = CalculateEdgeChecksum(std::move(node_based_edge_list));
// Sorting contracted edges in a way that the static query graph can read some in in-place.
tbb::parallel_sort(contracted_edge_list->begin(), contracted_edge_list->end());
const unsigned contracted_edge_count = contracted_edge_list->size();
SimpleLogger().Write() << "Serializing compacted graph of " << contracted_edge_count SimpleLogger().Write() << "Serializing compacted graph of " << contracted_edge_count
<< " edges"; << " edges";
boost::filesystem::ofstream hsgr_output_stream(graph_out, std::ios::binary); const FingerPrint fingerprint = FingerPrint::GetValid();
hsgr_output_stream.write((char *)&fingerprint_orig, sizeof(FingerPrint)); boost::filesystem::ofstream hsgr_output_stream(config.graph_output_path, std::ios::binary);
hsgr_output_stream.write((char *)&fingerprint, sizeof(FingerPrint));
const unsigned max_used_node_id = 1 + [&contracted_edge_list] const unsigned max_used_node_id = 1 + [&contracted_edge_list]
{ {
unsigned tmp_max = 0; unsigned tmp_max = 0;
for (const QueryEdge &edge : contracted_edge_list) for (const QueryEdge &edge : *contracted_edge_list)
{ {
BOOST_ASSERT(SPECIAL_NODEID != edge.source); BOOST_ASSERT(SPECIAL_NODEID != edge.source);
BOOST_ASSERT(SPECIAL_NODEID != edge.target); BOOST_ASSERT(SPECIAL_NODEID != edge.target);
@ -241,7 +172,7 @@ int Prepare::Process(int argc, char *argv[])
for (const auto node : osrm::irange(0u, max_used_node_id)) for (const auto node : osrm::irange(0u, max_used_node_id))
{ {
last_edge = edge; last_edge = edge;
while ((edge < contracted_edge_count) && (contracted_edge_list[edge].source == node)) while ((edge < contracted_edge_count) && ((*contracted_edge_list)[edge].source == node))
{ {
++edge; ++edge;
} }
@ -270,19 +201,19 @@ int Prepare::Process(int argc, char *argv[])
hsgr_output_stream.write((char *)&node_array[0], hsgr_output_stream.write((char *)&node_array[0],
sizeof(StaticGraph<EdgeData>::NodeArrayEntry) * node_array_size); sizeof(StaticGraph<EdgeData>::NodeArrayEntry) * node_array_size);
} }
// serialize all edges
// serialize all edges
SimpleLogger().Write() << "Building edge array"; SimpleLogger().Write() << "Building edge array";
edge = 0; edge = 0;
int number_of_used_edges = 0; int number_of_used_edges = 0;
StaticGraph<EdgeData>::EdgeArrayEntry current_edge; StaticGraph<EdgeData>::EdgeArrayEntry current_edge;
for (const auto edge : osrm::irange<std::size_t>(0, contracted_edge_list.size())) for (const auto edge : osrm::irange<std::size_t>(0, contracted_edge_list->size()))
{ {
// no eigen loops // no eigen loops
BOOST_ASSERT(contracted_edge_list[edge].source != contracted_edge_list[edge].target); BOOST_ASSERT((*contracted_edge_list)[edge].source != (*contracted_edge_list)[edge].target);
current_edge.target = contracted_edge_list[edge].target; current_edge.target = (*contracted_edge_list)[edge].target;
current_edge.data = contracted_edge_list[edge].data; current_edge.data = (*contracted_edge_list)[edge].data;
// every target needs to be valid // every target needs to be valid
BOOST_ASSERT(current_edge.target < max_used_node_id); BOOST_ASSERT(current_edge.target < max_used_node_id);
@ -290,12 +221,12 @@ int Prepare::Process(int argc, char *argv[])
if (current_edge.data.distance <= 0) if (current_edge.data.distance <= 0)
{ {
SimpleLogger().Write(logWARNING) << "Edge: " << edge SimpleLogger().Write(logWARNING) << "Edge: " << edge
<< ",source: " << contracted_edge_list[edge].source << ",source: " << (*contracted_edge_list)[edge].source
<< ", target: " << contracted_edge_list[edge].target << ", target: " << (*contracted_edge_list)[edge].target
<< ", dist: " << current_edge.data.distance; << ", dist: " << current_edge.data.distance;
SimpleLogger().Write(logWARNING) << "Failed at adjacency list of node " SimpleLogger().Write(logWARNING) << "Failed at adjacency list of node "
<< contracted_edge_list[edge].source << "/" << (*contracted_edge_list)[edge].source << "/"
<< node_array.size() - 1; << node_array.size() - 1;
return 1; return 1;
} }
@ -305,170 +236,54 @@ int Prepare::Process(int argc, char *argv[])
++number_of_used_edges; ++number_of_used_edges;
} }
hsgr_output_stream.close();
TIMER_STOP(preparing); return number_of_used_edges;
SimpleLogger().Write() << "Preprocessing : " << TIMER_SEC(preparing) << " seconds";
SimpleLogger().Write() << "Expansion : " << (number_of_node_based_nodes / TIMER_SEC(expansion))
<< " nodes/sec and "
<< (number_of_edge_based_nodes / TIMER_SEC(expansion)) << " edges/sec";
SimpleLogger().Write() << "Contraction: "
<< (number_of_edge_based_nodes / TIMER_SEC(contraction))
<< " nodes/sec and " << number_of_used_edges / TIMER_SEC(contraction)
<< " edges/sec";
node_array.clear();
SimpleLogger().Write() << "finished preprocessing";
return 0;
} }
/** unsigned Prepare::CalculateEdgeChecksum(std::unique_ptr<std::vector<EdgeBasedNode>> node_based_edge_list)
\brief Parses command line arguments
\param argc count of arguments
\param argv array of arguments
\param result [out] value for exit return value
\return true if everything is ok, false if need to terminate execution
*/
bool Prepare::ParseArguments(int argc, char *argv[])
{ {
// declare a group of options that will be allowed only on command line RangebasedCRC32 crc32;
boost::program_options::options_description generic_options("Options"); if (crc32.using_hardware())
generic_options.add_options()("version,v", "Show version")("help,h", "Show this help message")(
"config,c", boost::program_options::value<boost::filesystem::path>(&config_file_path)
->default_value("contractor.ini"),
"Path to a configuration file.");
// declare a group of options that will be allowed both on command line and in config file
boost::program_options::options_description config_options("Configuration");
config_options.add_options()(
"restrictions,r",
boost::program_options::value<boost::filesystem::path>(&restrictions_path),
"Restrictions file in .osrm.restrictions format")(
"profile,p", boost::program_options::value<boost::filesystem::path>(&profile_path)
->default_value("profile.lua"),
"Path to LUA routing profile")(
"threads,t", boost::program_options::value<unsigned int>(&requested_num_threads)
->default_value(tbb::task_scheduler_init::default_num_threads()),
"Number of threads to use");
// hidden options, will be allowed both on command line and in config file, but will not be
// shown to the user
boost::program_options::options_description hidden_options("Hidden options");
hidden_options.add_options()(
"input,i", boost::program_options::value<boost::filesystem::path>(&input_path),
"Input file in .osm, .osm.bz2 or .osm.pbf format");
// positional option
boost::program_options::positional_options_description positional_options;
positional_options.add("input", 1);
// combine above options for parsing
boost::program_options::options_description cmdline_options;
cmdline_options.add(generic_options).add(config_options).add(hidden_options);
boost::program_options::options_description config_file_options;
config_file_options.add(config_options).add(hidden_options);
boost::program_options::options_description visible_options(
"Usage: " + boost::filesystem::basename(argv[0]) + " <input.osrm> [options]");
visible_options.add(generic_options).add(config_options);
// parse command line options
boost::program_options::variables_map option_variables;
boost::program_options::store(boost::program_options::command_line_parser(argc, argv)
.options(cmdline_options)
.positional(positional_options)
.run(),
option_variables);
const auto &temp_config_path = option_variables["config"].as<boost::filesystem::path>();
if (boost::filesystem::is_regular_file(temp_config_path))
{ {
boost::program_options::store(boost::program_options::parse_config_file<char>( SimpleLogger().Write() << "using hardware based CRC32 computation";
temp_config_path.string().c_str(), cmdline_options, true), }
option_variables); else
{
SimpleLogger().Write() << "using software based CRC32 computation";
} }
if (option_variables.count("version")) const unsigned crc32_value = crc32(*node_based_edge_list);
{ SimpleLogger().Write() << "CRC32: " << crc32_value;
SimpleLogger().Write() << g_GIT_DESCRIPTION;
return false;
}
if (option_variables.count("help")) return crc32_value;
{
SimpleLogger().Write() << "\n" << visible_options;
return false;
}
boost::program_options::notify(option_variables);
if (!option_variables.count("restrictions"))
{
restrictions_path = std::string(input_path.string() + ".restrictions");
}
if (!option_variables.count("input"))
{
SimpleLogger().Write() << "\n" << visible_options;
return false;
}
return true;
}
/**
\brief Loads and checks file UUIDs
*/
void Prepare::CheckRestrictionsFile(FingerPrint &fingerprint_orig)
{
boost::filesystem::ifstream restriction_stream(restrictions_path, std::ios::binary);
FingerPrint fingerprint_loaded;
unsigned number_of_usable_restrictions = 0;
restriction_stream.read((char *)&fingerprint_loaded, sizeof(FingerPrint));
if (!fingerprint_loaded.TestPrepare(fingerprint_orig))
{
SimpleLogger().Write(logWARNING) << ".restrictions was prepared with different build.\n"
"Reprocess to get rid of this warning.";
}
restriction_stream.read((char *)&number_of_usable_restrictions, sizeof(unsigned));
restriction_list.resize(number_of_usable_restrictions);
if (number_of_usable_restrictions > 0)
{
restriction_stream.read((char *)&(restriction_list[0]),
number_of_usable_restrictions * sizeof(TurnRestriction));
}
restriction_stream.close();
} }
/** /**
\brief Setups scripting environment (lua-scripting) \brief Setups scripting environment (lua-scripting)
Also initializes speed profile. Also initializes speed profile.
*/ */
bool Prepare::SetupScriptingEnvironment( void Prepare::SetupScriptingEnvironment(
lua_State *lua_state, EdgeBasedGraphFactory::SpeedProfileProperties &speed_profile) lua_State *lua_state, EdgeBasedGraphFactory::SpeedProfileProperties &speed_profile)
{ {
// open utility libraries string library; // open utility libraries string library;
luaL_openlibs(lua_state); luaL_openlibs(lua_state);
// adjust lua load path // adjust lua load path
luaAddScriptFolderToLoadPath(lua_state, profile_path.string().c_str()); luaAddScriptFolderToLoadPath(lua_state, config.profile_path.string().c_str());
// Now call our function in a lua script // Now call our function in a lua script
if (0 != luaL_dofile(lua_state, profile_path.string().c_str())) if (0 != luaL_dofile(lua_state, config.profile_path.string().c_str()))
{ {
std::cerr << lua_tostring(lua_state, -1) << " occured in scripting block" << std::endl; std::stringstream msg;
return false; msg << lua_tostring(lua_state, -1) << " occured in scripting block";
throw osrm::exception(msg.str());
} }
if (0 != luaL_dostring(lua_state, "return traffic_signal_penalty\n")) if (0 != luaL_dostring(lua_state, "return traffic_signal_penalty\n"))
{ {
std::cerr << lua_tostring(lua_state, -1) << " occured in scripting block" << std::endl; std::stringstream msg;
return false; msg << lua_tostring(lua_state, -1) << " occured in scripting block";
throw osrm::exception(msg.str());
} }
speed_profile.traffic_signal_penalty = 10 * lua_tointeger(lua_state, -1); speed_profile.traffic_signal_penalty = 10 * lua_tointeger(lua_state, -1);
SimpleLogger().Write(logDEBUG) SimpleLogger().Write(logDEBUG)
@ -476,92 +291,140 @@ bool Prepare::SetupScriptingEnvironment(
if (0 != luaL_dostring(lua_state, "return u_turn_penalty\n")) if (0 != luaL_dostring(lua_state, "return u_turn_penalty\n"))
{ {
std::cerr << lua_tostring(lua_state, -1) << " occured in scripting block" << std::endl; std::stringstream msg;
return false; msg << lua_tostring(lua_state, -1) << " occured in scripting block";
throw osrm::exception(msg.str());
} }
speed_profile.u_turn_penalty = 10 * lua_tointeger(lua_state, -1); speed_profile.u_turn_penalty = 10 * lua_tointeger(lua_state, -1);
speed_profile.has_turn_penalty_function = lua_function_exists(lua_state, "turn_function"); speed_profile.has_turn_penalty_function = lua_function_exists(lua_state, "turn_function");
return true;
} }
/**
\brief Build load restrictions from .restriction file
*/
std::shared_ptr<RestrictionMap> Prepare::LoadRestrictionMap()
{
boost::filesystem::ifstream input_stream(config.restrictions_path, std::ios::in | std::ios::binary);
std::vector<TurnRestriction> restriction_list;
loadRestrictionsFromFile(input_stream, restriction_list);
SimpleLogger().Write() << " - " << restriction_list.size() << " restrictions.";
return std::make_shared<RestrictionMap>(restriction_list);
}
/**
\brief Load node based graph from .osrm file
*/
std::shared_ptr<NodeBasedDynamicGraph>
Prepare::LoadNodeBasedGraph(std::vector<NodeID> &barrier_node_list,
std::vector<NodeID> &traffic_light_list,
std::vector<QueryNode>& internal_to_external_node_map)
{
std::vector<NodeBasedEdge> edge_list;
boost::filesystem::ifstream input_stream(config.osrm_input_path, std::ios::in | std::ios::binary);
NodeID number_of_node_based_nodes = loadNodesFromFile(input_stream,
barrier_node_list, traffic_light_list,
internal_to_external_node_map);
SimpleLogger().Write() << " - " << barrier_node_list.size() << " bollard nodes, "
<< traffic_light_list.size() << " traffic lights";
loadEdgesFromFile(input_stream, edge_list);
if (edge_list.empty())
{
SimpleLogger().Write(logWARNING) << "The input data is empty, exiting.";
return std::shared_ptr<NodeBasedDynamicGraph>();
}
return NodeBasedDynamicGraphFromImportEdges(number_of_node_based_nodes, edge_list);
}
/** /**
\brief Building an edge-expanded graph from node-based input and turn restrictions \brief Building an edge-expanded graph from node-based input and turn restrictions
*/ */
std::size_t std::pair<std::size_t, std::size_t>
Prepare::BuildEdgeExpandedGraph(lua_State *lua_state, Prepare::BuildEdgeExpandedGraph(std::vector<QueryNode> &internal_to_external_node_map,
NodeID number_of_node_based_nodes,
std::vector<EdgeBasedNode> &node_based_edge_list, std::vector<EdgeBasedNode> &node_based_edge_list,
DeallocatingVector<EdgeBasedEdge> &edge_based_edge_list, DeallocatingVector<EdgeBasedEdge> &edge_based_edge_list)
EdgeBasedGraphFactory::SpeedProfileProperties &speed_profile)
{ {
SimpleLogger().Write() << "Generating edge-expanded graph representation"; lua_State *lua_state = luaL_newstate();
std::shared_ptr<NodeBasedDynamicGraph> node_based_graph = luabind::open(lua_state);
NodeBasedDynamicGraphFromImportEdges(number_of_node_based_nodes, edge_list);
std::unique_ptr<RestrictionMap> restriction_map =
osrm::make_unique<RestrictionMap>(restriction_list);
std::shared_ptr<EdgeBasedGraphFactory> edge_based_graph_factory =
std::make_shared<EdgeBasedGraphFactory>(node_based_graph, std::move(restriction_map),
barrier_node_list, traffic_light_list,
internal_to_external_node_map, speed_profile);
edge_list.clear();
edge_list.shrink_to_fit();
edge_based_graph_factory->Run(edge_out, geometry_filename, lua_state); EdgeBasedGraphFactory::SpeedProfileProperties speed_profile;
restriction_list.clear(); SetupScriptingEnvironment(lua_state, speed_profile);
restriction_list.shrink_to_fit();
barrier_node_list.clear(); auto barrier_node_list = osrm::make_unique<std::vector<NodeID>>();
barrier_node_list.shrink_to_fit(); auto traffic_light_list = osrm::make_unique<std::vector<NodeID>>();
traffic_light_list.clear();
traffic_light_list.shrink_to_fit(); auto restriction_map = LoadRestrictionMap();
auto node_based_graph = LoadNodeBasedGraph(*barrier_node_list, *traffic_light_list, internal_to_external_node_map);
const std::size_t number_of_node_based_nodes = node_based_graph->GetNumberOfNodes();
EdgeBasedGraphFactory edge_based_graph_factory(node_based_graph,
restriction_map,
std::move(barrier_node_list),
std::move(traffic_light_list),
internal_to_external_node_map,
speed_profile);
edge_based_graph_factory.Run(config.edge_output_path, config.geometry_output_path, lua_state);
lua_close(lua_state);
const std::size_t number_of_edge_based_nodes = const std::size_t number_of_edge_based_nodes =
edge_based_graph_factory->GetNumberOfEdgeBasedNodes(); edge_based_graph_factory.GetNumberOfEdgeBasedNodes();
BOOST_ASSERT(number_of_edge_based_nodes != std::numeric_limits<unsigned>::max()); BOOST_ASSERT(number_of_edge_based_nodes != std::numeric_limits<unsigned>::max());
#ifndef WIN32
static_assert(sizeof(EdgeBasedEdge) == 16,
"changing ImportEdge type has influence on memory consumption!");
#endif
edge_based_graph_factory->GetEdgeBasedEdges(edge_based_edge_list); edge_based_graph_factory.GetEdgeBasedEdges(edge_based_edge_list);
edge_based_graph_factory->GetEdgeBasedNodes(node_based_edge_list); edge_based_graph_factory.GetEdgeBasedNodes(node_based_edge_list);
edge_based_graph_factory.reset(); return std::make_pair(number_of_node_based_nodes, number_of_edge_based_nodes);
node_based_graph.reset(); }
return number_of_edge_based_nodes; /**
\brief Build contracted graph.
*/
void Prepare::ContractGraph(const std::size_t number_of_edge_based_nodes,
DeallocatingVector<EdgeBasedEdge>& edge_based_edge_list,
DeallocatingVector<QueryEdge>& contracted_edge_list)
{
Contractor contractor(number_of_edge_based_nodes, edge_based_edge_list);
contractor.Run();
contractor.GetEdges(contracted_edge_list);
} }
/** /**
\brief Writing info on original (node-based) nodes \brief Writing info on original (node-based) nodes
*/ */
void Prepare::WriteNodeMapping() void Prepare::WriteNodeMapping(std::unique_ptr<std::vector<QueryNode>> internal_to_external_node_map)
{ {
SimpleLogger().Write() << "writing node map ..."; boost::filesystem::ofstream node_stream(config.node_output_path, std::ios::binary);
boost::filesystem::ofstream node_stream(node_filename, std::ios::binary); const unsigned size_of_mapping = internal_to_external_node_map->size();
const unsigned size_of_mapping = internal_to_external_node_map.size();
node_stream.write((char *)&size_of_mapping, sizeof(unsigned)); node_stream.write((char *)&size_of_mapping, sizeof(unsigned));
if (size_of_mapping > 0) if (size_of_mapping > 0)
{ {
node_stream.write((char *)&(internal_to_external_node_map[0]), node_stream.write((char *) internal_to_external_node_map->data(),
size_of_mapping * sizeof(QueryNode)); size_of_mapping * sizeof(QueryNode));
} }
node_stream.close(); node_stream.close();
internal_to_external_node_map.clear();
internal_to_external_node_map.shrink_to_fit();
} }
/** /**
\brief Building rtree-based nearest-neighbor data structure \brief Building rtree-based nearest-neighbor data structure
Saves info to files: '.ramIndex' and '.fileIndex'. Saves tree into '.ramIndex' and leaves into '.fileIndex'.
*/ */
void Prepare::BuildRTree(std::vector<EdgeBasedNode> &node_based_edge_list) void Prepare::BuildRTree(const std::vector<EdgeBasedNode> &node_based_edge_list, const std::vector<QueryNode>& internal_to_external_node_map)
{ {
SimpleLogger().Write() << "building r-tree ..."; StaticRTree<EdgeBasedNode>(node_based_edge_list, config.rtree_nodes_output_path.c_str(),
StaticRTree<EdgeBasedNode>(node_based_edge_list, rtree_nodes_path.c_str(), config.rtree_leafs_output_path.c_str(), internal_to_external_node_map);
rtree_leafs_path.c_str(), internal_to_external_node_map);
} }

View File

@ -28,11 +28,11 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef PROCESSING_CHAIN_HPP #ifndef PROCESSING_CHAIN_HPP
#define PROCESSING_CHAIN_HPP #define PROCESSING_CHAIN_HPP
#include "contractor_options.hpp"
#include "edge_based_graph_factory.hpp" #include "edge_based_graph_factory.hpp"
#include "../data_structures/query_edge.hpp" #include "../data_structures/query_edge.hpp"
#include "../data_structures/static_graph.hpp" #include "../data_structures/static_graph.hpp"
class FingerPrint;
struct EdgeBasedNode; struct EdgeBasedNode;
struct lua_State; struct lua_State;
@ -50,46 +50,36 @@ class Prepare
using InputEdge = DynamicGraph<EdgeData>::InputEdge; using InputEdge = DynamicGraph<EdgeData>::InputEdge;
using StaticEdge = StaticGraph<EdgeData>::InputEdge; using StaticEdge = StaticGraph<EdgeData>::InputEdge;
explicit Prepare(); explicit Prepare(const ContractorConfig& contractor_config)
: config(contractor_config) {}
Prepare(const Prepare &) = delete; Prepare(const Prepare &) = delete;
~Prepare(); ~Prepare();
int Process(int argc, char *argv[]); int Run();
protected: protected:
bool ParseArguments(int argc, char *argv[]); void SetupScriptingEnvironment(lua_State *myLuaState,
void CheckRestrictionsFile(FingerPrint &fingerprint_orig);
bool SetupScriptingEnvironment(lua_State *myLuaState,
EdgeBasedGraphFactory::SpeedProfileProperties &speed_profile); EdgeBasedGraphFactory::SpeedProfileProperties &speed_profile);
std::size_t BuildEdgeExpandedGraph(lua_State *myLuaState, std::shared_ptr<RestrictionMap> LoadRestrictionMap();
NodeID nodeBasedNodeNumber, unsigned CalculateEdgeChecksum(std::unique_ptr<std::vector<EdgeBasedNode>> node_based_edge_list);
std::vector<EdgeBasedNode> &nodeBasedEdgeList, void ContractGraph(const std::size_t number_of_edge_based_nodes,
DeallocatingVector<EdgeBasedEdge> &edgeBasedEdgeList, DeallocatingVector<EdgeBasedEdge>& edge_based_edge_list,
EdgeBasedGraphFactory::SpeedProfileProperties &speed_profile); DeallocatingVector<QueryEdge>& contracted_edge_list);
void WriteNodeMapping(); std::size_t WriteContractedGraph(unsigned number_of_edge_based_nodes,
void BuildRTree(std::vector<EdgeBasedNode> &node_based_edge_list); std::unique_ptr<std::vector<EdgeBasedNode>> node_based_edge_list,
std::unique_ptr<DeallocatingVector<QueryEdge>> contracted_edge_list);
std::shared_ptr<NodeBasedDynamicGraph> LoadNodeBasedGraph(std::vector<NodeID> &barrier_node_list,
std::vector<NodeID> &traffic_light_list,
std::vector<QueryNode>& internal_to_external_node_map);
std::pair<std::size_t, std::size_t>
BuildEdgeExpandedGraph(std::vector<QueryNode> &internal_to_external_node_map,
std::vector<EdgeBasedNode> &node_based_edge_list,
DeallocatingVector<EdgeBasedEdge> &edge_based_edge_list);
void WriteNodeMapping(std::unique_ptr<std::vector<QueryNode>> internal_to_external_node_map);
void BuildRTree(const std::vector<EdgeBasedNode> &node_based_edge_list,
const std::vector<QueryNode> &internal_to_external_node_map);
private: private:
std::vector<QueryNode> internal_to_external_node_map; ContractorConfig config;
std::vector<TurnRestriction> restriction_list;
std::vector<NodeID> barrier_node_list;
std::vector<NodeID> traffic_light_list;
std::vector<ImportEdge> edge_list;
unsigned requested_num_threads;
boost::filesystem::path config_file_path;
boost::filesystem::path input_path;
boost::filesystem::path restrictions_path;
boost::filesystem::path preinfo_path;
boost::filesystem::path profile_path;
std::string node_filename;
std::string edge_out;
std::string info_out;
std::string geometry_filename;
std::string graph_out;
std::string rtree_nodes_path;
std::string rtree_leafs_path;
}; };
#endif // PROCESSING_CHAIN_HPP #endif // PROCESSING_CHAIN_HPP

View File

@ -85,6 +85,9 @@ template <typename EdgeDataT> class DynamicGraph
edge_list.resize(number_of_nodes); edge_list.resize(number_of_nodes);
} }
/**
* Constructs a DynamicGraph from a list of edges sorted by source node id.
*/
template <class ContainerT> DynamicGraph(const NodeIterator nodes, const ContainerT &graph) template <class ContainerT> DynamicGraph(const NodeIterator nodes, const ContainerT &graph)
{ {
number_of_nodes = nodes; number_of_nodes = nodes;

View File

@ -47,6 +47,13 @@ bool NodeBasedEdge::operator<(const NodeBasedEdge &other) const
return source < other.source; return source < other.source;
} }
NodeBasedEdge::NodeBasedEdge()
: source(SPECIAL_NODEID), target(SPECIAL_NODEID), name_id(0), weight(0), forward(false),
backward(false), roundabout(false), in_tiny_cc(false),
access_restricted(false), is_split(false), travel_mode(false)
{
}
NodeBasedEdge::NodeBasedEdge(NodeID source, NodeBasedEdge::NodeBasedEdge(NodeID source,
NodeID target, NodeID target,
NodeID name_id, NodeID name_id,

View File

@ -35,6 +35,7 @@ struct NodeBasedEdge
{ {
bool operator<(const NodeBasedEdge &e) const; bool operator<(const NodeBasedEdge &e) const;
NodeBasedEdge();
explicit NodeBasedEdge(NodeID source, explicit NodeBasedEdge(NodeID source,
NodeID target, NodeID target,
NodeID name_id, NodeID name_id,
@ -58,8 +59,6 @@ struct NodeBasedEdge
bool access_restricted : 1; bool access_restricted : 1;
bool is_split : 1; bool is_split : 1;
TravelMode travel_mode : 4; TravelMode travel_mode : 4;
NodeBasedEdge() = delete;
}; };
struct EdgeBasedEdge struct EdgeBasedEdge
@ -86,6 +85,4 @@ struct EdgeBasedEdge
bool backward : 1; bool backward : 1;
}; };
using ImportEdge = NodeBasedEdge;
#endif /* IMPORT_EDGE_HPP */ #endif /* IMPORT_EDGE_HPP */

View File

@ -64,7 +64,7 @@ struct NodeBasedEdgeData
backward = temp_flag; backward = temp_flag;
} }
bool IsEqualTo(const NodeBasedEdgeData &other) const bool IsCompatibleTo(const NodeBasedEdgeData &other) const
{ {
return (forward == other.forward) && (backward == other.backward) && return (forward == other.forward) && (backward == other.backward) &&
(nameID == other.nameID) && (ignore_in_grid == other.ignore_in_grid) && (nameID == other.nameID) && (ignore_in_grid == other.ignore_in_grid) &&
@ -72,26 +72,123 @@ struct NodeBasedEdgeData
} }
}; };
struct SimpleEdgeData
{
SimpleEdgeData() : capacity(0) {}
EdgeWeight capacity;
};
using NodeBasedDynamicGraph = DynamicGraph<NodeBasedEdgeData>; using NodeBasedDynamicGraph = DynamicGraph<NodeBasedEdgeData>;
using SimpleNodeBasedDynamicGraph = DynamicGraph<SimpleEdgeData>;
// Factory method to create NodeBasedDynamicGraph from ImportEdges inline bool validateNeighborHood(const NodeBasedDynamicGraph& graph, const NodeID source)
{
for (auto edge = graph.BeginEdges(source); edge < graph.EndEdges(source); ++edge)
{
const auto& data = graph.GetEdgeData(edge);
if (!data.forward && !data.backward)
{
SimpleLogger().Write(logWARNING) << "Invalid edge directions";
return false;
}
auto target = graph.GetTarget(edge);
if (target == SPECIAL_NODEID)
{
SimpleLogger().Write(logWARNING) << "Invalid edge target";
return false;
}
bool found_reverse = false;
for (auto rev_edge = graph.BeginEdges(target); rev_edge < graph.EndEdges(target); ++rev_edge)
{
auto rev_target = graph.GetTarget(rev_edge);
if (rev_target == SPECIAL_NODEID)
{
SimpleLogger().Write(logWARNING) << "Invalid reverse edge target";
return false;
}
if (rev_target != source)
{
continue;
}
if (found_reverse)
{
SimpleLogger().Write(logWARNING) << "Found more than one reverse edge";
return false;
}
const auto& rev_data = graph.GetEdgeData(rev_edge);
// edge is incoming, this must be an outgoing edge
if (data.backward && !rev_data.forward)
{
SimpleLogger().Write(logWARNING) << "Found no outgoing edge to an incoming edge!";
return false;
}
// edge is bi-directional, reverse must be as well
if (data.forward && data.backward && (!rev_data.forward || !rev_data.backward))
{
SimpleLogger().Write(logWARNING) << "Found bi-directional edge that is not bi-directional to both ends";
return false;
}
found_reverse = true;
}
if (!found_reverse)
{
SimpleLogger().Write(logWARNING) << "Could not find reverse edge";
return false;
}
}
return true;
}
// This function checks if the overal graph is undirected (has an edge in each direction).
inline bool validateNodeBasedGraph(const NodeBasedDynamicGraph& graph)
{
for (auto source = 0u; source < graph.GetNumberOfNodes(); ++source)
{
if (!validateNeighborHood(graph, source))
{
return false;
}
}
return true;
}
// Factory method to create NodeBasedDynamicGraph from NodeBasedEdges
// The since DynamicGraph expects directed edges, we need to insert
// two edges for undirected edges.
inline std::shared_ptr<NodeBasedDynamicGraph> inline std::shared_ptr<NodeBasedDynamicGraph>
NodeBasedDynamicGraphFromImportEdges(int number_of_nodes, std::vector<ImportEdge> &input_edge_list) NodeBasedDynamicGraphFromImportEdges(int number_of_nodes, std::vector<NodeBasedEdge> &input_edge_list)
{ {
static_assert(sizeof(NodeBasedEdgeData) == 16, static_assert(sizeof(NodeBasedEdgeData) == 16,
"changing node based edge data size changes memory consumption"); "changing node based edge data size changes memory consumption");
DeallocatingVector<NodeBasedDynamicGraph::InputEdge> edges_list; DeallocatingVector<NodeBasedDynamicGraph::InputEdge> edges_list;
NodeBasedDynamicGraph::InputEdge edge; NodeBasedDynamicGraph::InputEdge edge;
for (const ImportEdge &import_edge : input_edge_list)
// Since DynamicGraph assumes directed edges we have to make sure we transformed
// the compressed edge format into single directed edges. We do this to make sure
// every node also knows its incoming edges, not only its outgoing edges and use the backward=true
// flag to indicate which is which.
//
// We do the transformation in the following way:
//
// if the edge (a, b) is split:
// 1. this edge must be in only one direction, so its a --> b
// 2. there must be another directed edge b --> a somewhere in the data
// if the edge (a, b) is not split:
// 1. this edge be on of a --> b od a <-> b
// (a <-- b gets reducted to b --> a)
// 2. a --> b will be transformed to a --> b and b <-- a
// 3. a <-> b will be transformed to a <-> b and b <-> a (I think a --> b and b <-- a would work as well though)
for (const NodeBasedEdge &import_edge : input_edge_list)
{ {
// edges that are not forward get converted by flipping the end points
BOOST_ASSERT(import_edge.forward);
if (import_edge.forward) if (import_edge.forward)
{ {
edge.source = import_edge.source; edge.source = import_edge.source;
@ -99,20 +196,10 @@ NodeBasedDynamicGraphFromImportEdges(int number_of_nodes, std::vector<ImportEdge
edge.data.forward = import_edge.forward; edge.data.forward = import_edge.forward;
edge.data.backward = import_edge.backward; edge.data.backward = import_edge.backward;
} }
else
{
edge.source = import_edge.target;
edge.target = import_edge.source;
edge.data.backward = import_edge.forward;
edge.data.forward = import_edge.backward;
}
if (edge.source == edge.target) BOOST_ASSERT(edge.source != edge.target);
{
continue;
}
edge.data.distance = (std::max)(static_cast<int>(import_edge.weight), 1); edge.data.distance = static_cast<int>(import_edge.weight);
BOOST_ASSERT(edge.data.distance > 0); BOOST_ASSERT(edge.data.distance > 0);
edge.data.shortcut = false; edge.data.shortcut = false;
edge.data.roundabout = import_edge.roundabout; edge.data.roundabout = import_edge.roundabout;
@ -132,147 +219,16 @@ NodeBasedDynamicGraphFromImportEdges(int number_of_nodes, std::vector<ImportEdge
} }
} }
// remove duplicate edges
tbb::parallel_sort(edges_list.begin(), edges_list.end()); tbb::parallel_sort(edges_list.begin(), edges_list.end());
NodeID edge_count = 0;
for (NodeID i = 0; i < edges_list.size();)
{
const NodeID source = edges_list[i].source;
const NodeID target = edges_list[i].target;
// remove eigenloops
if (source == target)
{
i++;
continue;
}
NodeBasedDynamicGraph::InputEdge forward_edge;
NodeBasedDynamicGraph::InputEdge reverse_edge;
forward_edge = reverse_edge = edges_list[i];
forward_edge.data.forward = reverse_edge.data.backward = true;
forward_edge.data.backward = reverse_edge.data.forward = false;
forward_edge.data.shortcut = reverse_edge.data.shortcut = false;
forward_edge.data.distance = reverse_edge.data.distance = std::numeric_limits<int>::max();
// remove parallel edges
while (i < edges_list.size() && edges_list[i].source == source &&
edges_list[i].target == target)
{
if (edges_list[i].data.forward)
{
forward_edge.data.distance =
std::min(edges_list[i].data.distance, forward_edge.data.distance);
}
if (edges_list[i].data.backward)
{
reverse_edge.data.distance =
std::min(edges_list[i].data.distance, reverse_edge.data.distance);
}
++i;
}
// merge edges (s,t) and (t,s) into bidirectional edge
if (forward_edge.data.distance == reverse_edge.data.distance)
{
if (static_cast<int>(forward_edge.data.distance) != std::numeric_limits<int>::max())
{
forward_edge.data.backward = true;
edges_list[edge_count++] = forward_edge;
}
}
else
{ // insert seperate edges
if (static_cast<int>(forward_edge.data.distance) != std::numeric_limits<int>::max())
{
edges_list[edge_count++] = forward_edge;
}
if (static_cast<int>(reverse_edge.data.distance) != std::numeric_limits<int>::max())
{
edges_list[edge_count++] = reverse_edge;
}
}
}
edges_list.resize(edge_count);
SimpleLogger().Write() << "merged " << edges_list.size() - edge_count << " edges out of "
<< edges_list.size();
auto graph = std::make_shared<NodeBasedDynamicGraph>( auto graph = std::make_shared<NodeBasedDynamicGraph>(
static_cast<NodeBasedDynamicGraph::NodeIterator>(number_of_nodes), edges_list); static_cast<NodeBasedDynamicGraph::NodeIterator>(number_of_nodes), edges_list);
return graph;
}
template <class SimpleEdgeT>
inline std::shared_ptr<SimpleNodeBasedDynamicGraph>
SimpleNodeBasedDynamicGraphFromEdges(int number_of_nodes, std::vector<SimpleEdgeT> &input_edge_list)
{
static_assert(sizeof(NodeBasedEdgeData) == 16,
"changing node based edge data size changes memory consumption");
tbb::parallel_sort(input_edge_list.begin(), input_edge_list.end());
DeallocatingVector<SimpleNodeBasedDynamicGraph::InputEdge> edges_list; #ifndef NDEBUG
SimpleNodeBasedDynamicGraph::InputEdge edge; BOOST_ASSERT(validateNodeBasedGraph(*graph));
edge.data.capacity = 1; #endif
for (const SimpleEdgeT &import_edge : input_edge_list)
{
if (import_edge.source == import_edge.target)
{
continue;
}
edge.source = import_edge.source;
edge.target = import_edge.target;
edges_list.push_back(edge);
std::swap(edge.source, edge.target);
edges_list.push_back(edge);
}
// remove duplicate edges
tbb::parallel_sort(edges_list.begin(), edges_list.end());
NodeID edge_count = 0;
for (NodeID i = 0; i < edges_list.size();)
{
const NodeID source = edges_list[i].source;
const NodeID target = edges_list[i].target;
// remove eigenloops
if (source == target)
{
i++;
continue;
}
SimpleNodeBasedDynamicGraph::InputEdge forward_edge;
SimpleNodeBasedDynamicGraph::InputEdge reverse_edge;
forward_edge = reverse_edge = edges_list[i];
forward_edge.data.capacity = reverse_edge.data.capacity = INVALID_EDGE_WEIGHT;
// remove parallel edges
while (i < edges_list.size() && edges_list[i].source == source &&
edges_list[i].target == target)
{
forward_edge.data.capacity =
std::min(edges_list[i].data.capacity, forward_edge.data.capacity);
reverse_edge.data.capacity =
std::min(edges_list[i].data.capacity, reverse_edge.data.capacity);
++i;
}
// merge edges (s,t) and (t,s) into bidirectional edge
if (forward_edge.data.capacity == reverse_edge.data.capacity)
{
if (static_cast<int>(forward_edge.data.capacity) != INVALID_EDGE_WEIGHT)
{
edges_list[edge_count++] = forward_edge;
}
}
else
{ // insert seperate edges
if (static_cast<int>(forward_edge.data.capacity) != INVALID_EDGE_WEIGHT)
{
edges_list[edge_count++] = forward_edge;
}
if (static_cast<int>(reverse_edge.data.capacity) != INVALID_EDGE_WEIGHT)
{
edges_list[edge_count++] = reverse_edge;
}
}
}
SimpleLogger().Write() << "merged " << edges_list.size() - edge_count << " edges out of "
<< edges_list.size();
auto graph = std::make_shared<SimpleNodeBasedDynamicGraph>(number_of_nodes, edges_list);
return graph; return graph;
} }

View File

@ -91,6 +91,7 @@ template <> struct hash<RestrictionTarget>
class RestrictionMap class RestrictionMap
{ {
public: public:
RestrictionMap() : m_count(0) {};
RestrictionMap(const std::vector<TurnRestriction> &restriction_list); RestrictionMap(const std::vector<TurnRestriction> &restriction_list);
// Replace end v with w in each turn restriction containing u as via node // Replace end v with w in each turn restriction containing u as via node
@ -98,7 +99,7 @@ class RestrictionMap
void FixupArrivingTurnRestriction(const NodeID node_u, void FixupArrivingTurnRestriction(const NodeID node_u,
const NodeID node_v, const NodeID node_v,
const NodeID node_w, const NodeID node_w,
const std::shared_ptr<GraphT> &graph) const GraphT &graph)
{ {
BOOST_ASSERT(node_u != SPECIAL_NODEID); BOOST_ASSERT(node_u != SPECIAL_NODEID);
BOOST_ASSERT(node_v != SPECIAL_NODEID); BOOST_ASSERT(node_v != SPECIAL_NODEID);
@ -112,9 +113,9 @@ class RestrictionMap
// find all potential start edges. It is more efficent to get a (small) list // find all potential start edges. It is more efficent to get a (small) list
// of potential start edges than iterating over all buckets // of potential start edges than iterating over all buckets
std::vector<NodeID> predecessors; std::vector<NodeID> predecessors;
for (const EdgeID current_edge_id : graph->GetAdjacentEdgeRange(node_u)) for (const EdgeID current_edge_id : graph.GetAdjacentEdgeRange(node_u))
{ {
const NodeID target = graph->GetTarget(current_edge_id); const NodeID target = graph.GetTarget(current_edge_id);
if (node_v != target) if (node_v != target)
{ {
predecessors.push_back(target); predecessors.push_back(target);

View File

@ -31,6 +31,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <osrm/route_parameters.hpp> #include <osrm/route_parameters.hpp>
#include "../algorithms/polyline_compressor.hpp"
RouteParameters::RouteParameters() RouteParameters::RouteParameters()
: zoom_level(18), print_instructions(false), alternate_route(true), geometry(true), : zoom_level(18), print_instructions(false), alternate_route(true), geometry(true),
compression(true), deprecatedAPI(false), uturn_default(false), classify(false), compression(true), deprecatedAPI(false), uturn_default(false), classify(false),
@ -131,3 +133,9 @@ void RouteParameters::addCoordinate(
static_cast<int>(COORDINATE_PRECISION * boost::fusion::at_c<0>(received_coordinates)), static_cast<int>(COORDINATE_PRECISION * boost::fusion::at_c<0>(received_coordinates)),
static_cast<int>(COORDINATE_PRECISION * boost::fusion::at_c<1>(received_coordinates))); static_cast<int>(COORDINATE_PRECISION * boost::fusion::at_c<1>(received_coordinates)));
} }
void RouteParameters::getCoordinatesFromGeometry(const std::string geometry_string)
{
PolylineCompressor pc;
coordinates = pc.decode_string(geometry_string);
}

View File

@ -344,7 +344,7 @@ class StaticRTree
StaticRTree(const StaticRTree &) = delete; StaticRTree(const StaticRTree &) = delete;
// Construct a packed Hilbert-R-Tree with Kamel-Faloutsos algorithm [1] // Construct a packed Hilbert-R-Tree with Kamel-Faloutsos algorithm [1]
explicit StaticRTree(std::vector<EdgeDataT> &input_data_vector, explicit StaticRTree(const std::vector<EdgeDataT> &input_data_vector,
const std::string tree_node_filename, const std::string tree_node_filename,
const std::string leaf_node_filename, const std::string leaf_node_filename,
const std::vector<QueryNode> &coordinate_list) const std::vector<QueryNode> &coordinate_list)
@ -646,22 +646,16 @@ class StaticRTree
const FixedPointCoordinate &input_coordinate, const FixedPointCoordinate &input_coordinate,
std::vector<PhantomNode> &result_phantom_node_vector, std::vector<PhantomNode> &result_phantom_node_vector,
const unsigned max_number_of_phantom_nodes, const unsigned max_number_of_phantom_nodes,
const unsigned max_checked_elements = 4 * LEAF_NODE_SIZE) const float max_distance = 1100)
{ {
unsigned inspected_elements = 0; unsigned inspected_elements = 0;
unsigned number_of_elements_from_big_cc = 0; unsigned number_of_elements_from_big_cc = 0;
unsigned number_of_elements_from_tiny_cc = 0; unsigned number_of_elements_from_tiny_cc = 0;
#ifdef NDEBUG
unsigned pruned_elements = 0;
#endif
std::pair<double, double> projected_coordinate = { std::pair<double, double> projected_coordinate = {
mercator::lat2y(input_coordinate.lat / COORDINATE_PRECISION), mercator::lat2y(input_coordinate.lat / COORDINATE_PRECISION),
input_coordinate.lon / COORDINATE_PRECISION}; input_coordinate.lon / COORDINATE_PRECISION};
// upper bound pruning technique
upper_bound<float> pruning_bound(max_number_of_phantom_nodes);
// initialize queue with root element // initialize queue with root element
std::priority_queue<IncrementalQueryCandidate> traversal_queue; std::priority_queue<IncrementalQueryCandidate> traversal_queue;
traversal_queue.emplace(0.f, m_search_tree[0]); traversal_queue.emplace(0.f, m_search_tree[0]);
@ -669,6 +663,10 @@ class StaticRTree
while (!traversal_queue.empty()) while (!traversal_queue.empty())
{ {
const IncrementalQueryCandidate current_query_node = traversal_queue.top(); const IncrementalQueryCandidate current_query_node = traversal_queue.top();
if (current_query_node.min_dist > max_distance)
{
break;
}
traversal_queue.pop(); traversal_queue.pop();
if (current_query_node.node.template is<TreeNode>()) if (current_query_node.node.template is<TreeNode>())
@ -692,18 +690,7 @@ class StaticRTree
// distance must be non-negative // distance must be non-negative
BOOST_ASSERT(0.f <= current_perpendicular_distance); BOOST_ASSERT(0.f <= current_perpendicular_distance);
if (pruning_bound.get() >= current_perpendicular_distance || traversal_queue.emplace(current_perpendicular_distance, current_edge);
current_edge.is_in_tiny_cc())
{
pruning_bound.insert(current_perpendicular_distance);
traversal_queue.emplace(current_perpendicular_distance, current_edge);
}
#ifdef NDEBUG
else
{
++pruned_elements;
}
#endif
} }
} }
else else
@ -771,9 +758,8 @@ class StaticRTree
} }
// stop the search by flushing the queue // stop the search by flushing the queue
if ((result_phantom_node_vector.size() >= max_number_of_phantom_nodes && if (result_phantom_node_vector.size() >= max_number_of_phantom_nodes &&
number_of_elements_from_big_cc > 0) || number_of_elements_from_big_cc > 0)
inspected_elements >= max_checked_elements)
{ {
traversal_queue = std::priority_queue<IncrementalQueryCandidate>{}; traversal_queue = std::priority_queue<IncrementalQueryCandidate>{};
} }
@ -809,6 +795,10 @@ class StaticRTree
unsigned number_of_elements_from_big_cc = 0; unsigned number_of_elements_from_big_cc = 0;
unsigned number_of_elements_from_tiny_cc = 0; unsigned number_of_elements_from_tiny_cc = 0;
// is true if a big cc was added to the queue to we also have a lower bound
// for them. it actives pruning for big components
bool has_big_cc = false;
unsigned pruned_elements = 0; unsigned pruned_elements = 0;
std::pair<double, double> projected_coordinate = { std::pair<double, double> projected_coordinate = {
@ -849,10 +839,11 @@ class StaticRTree
BOOST_ASSERT(0.f <= current_perpendicular_distance); BOOST_ASSERT(0.f <= current_perpendicular_distance);
if (pruning_bound.get() >= current_perpendicular_distance || if (pruning_bound.get() >= current_perpendicular_distance ||
current_edge.is_in_tiny_cc()) (!has_big_cc && !current_edge.is_in_tiny_cc()))
{ {
pruning_bound.insert(current_perpendicular_distance); pruning_bound.insert(current_perpendicular_distance);
traversal_queue.emplace(current_perpendicular_distance, current_edge); traversal_queue.emplace(current_perpendicular_distance, current_edge);
has_big_cc = has_big_cc || !current_edge.is_in_tiny_cc();
} }
else else
{ {

View File

@ -264,9 +264,10 @@ int main(const int argc, const char *argv[])
boost::filesystem::ifstream hsgr_input_stream(hsgr_path, std::ios::binary); boost::filesystem::ifstream hsgr_input_stream(hsgr_path, std::ios::binary);
FingerPrint fingerprint_loaded, fingerprint_orig; FingerPrint fingerprint_valid = FingerPrint::GetValid();
FingerPrint fingerprint_loaded;
hsgr_input_stream.read((char *)&fingerprint_loaded, sizeof(FingerPrint)); hsgr_input_stream.read((char *)&fingerprint_loaded, sizeof(FingerPrint));
if (fingerprint_loaded.TestGraphUtil(fingerprint_orig)) if (fingerprint_loaded.TestGraphUtil(fingerprint_valid))
{ {
SimpleLogger().Write(logDEBUG) << "Fingerprint checked out ok"; SimpleLogger().Write(logDEBUG) << "Fingerprint checked out ok";
} }

View File

@ -173,6 +173,7 @@ void DescriptionFactory::Run(const unsigned zoom_level)
// } // }
// string0 = string1; // string0 = string1;
// } // }
//
float segment_length = 0.; float segment_length = 0.;
EdgeWeight segment_duration = 0; EdgeWeight segment_duration = 0;
@ -197,7 +198,8 @@ void DescriptionFactory::Run(const unsigned zoom_level)
// Post-processing to remove empty or nearly empty path segments // Post-processing to remove empty or nearly empty path segments
if (path_description.size() > 2 && if (path_description.size() > 2 &&
std::numeric_limits<float>::epsilon() > path_description.back().length) std::numeric_limits<float>::epsilon() > path_description.back().length &&
!(path_description.end() - 2)->is_via_location)
{ {
path_description.pop_back(); path_description.pop_back();
path_description.back().necessary = true; path_description.back().necessary = true;
@ -206,7 +208,8 @@ void DescriptionFactory::Run(const unsigned zoom_level)
} }
if (path_description.size() > 2 && if (path_description.size() > 2 &&
std::numeric_limits<float>::epsilon() > path_description.front().length) std::numeric_limits<float>::epsilon() > path_description.front().length &&
!(path_description.begin() + 1)->is_via_location)
{ {
path_description.erase(path_description.begin()); path_description.erase(path_description.begin());
path_description.front().turn_instruction = TurnInstruction::HeadOn; path_description.front().turn_instruction = TurnInstruction::HeadOn;

View File

@ -73,7 +73,7 @@ int main(int argc, char *argv[])
<< " not found!"; << " not found!";
return 1; return 1;
} }
return extractor().run(extractor_config); return extractor(extractor_config).run();
} }
catch (const std::exception &e) catch (const std::exception &e)
{ {

View File

@ -35,6 +35,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../util/osrm_exception.hpp" #include "../util/osrm_exception.hpp"
#include "../util/simple_logger.hpp" #include "../util/simple_logger.hpp"
#include "../util/timing_util.hpp" #include "../util/timing_util.hpp"
#include "../util/fingerprint.hpp"
#include <boost/assert.hpp> #include <boost/assert.hpp>
#include <boost/filesystem.hpp> #include <boost/filesystem.hpp>
@ -54,6 +55,7 @@ ExtractionContainers::ExtractionContainers()
ExtractionContainers::~ExtractionContainers() ExtractionContainers::~ExtractionContainers()
{ {
// FIXME isn't this done implicitly of the stxxl::vectors go out of scope?
used_node_id_list.clear(); used_node_id_list.clear();
all_nodes_list.clear(); all_nodes_list.clear();
all_edges_list.clear(); all_edges_list.clear();
@ -71,394 +73,561 @@ ExtractionContainers::~ExtractionContainers()
* - filter nodes list to nodes that are referenced by ways * - filter nodes list to nodes that are referenced by ways
* - merge edges with nodes to include location of start/end points and serialize * - merge edges with nodes to include location of start/end points and serialize
* *
* FIXME: Each of this step should be an own function for readability.
*/ */
void ExtractionContainers::PrepareData(const std::string &output_file_name, void ExtractionContainers::PrepareData(const std::string &output_file_name,
const std::string &restrictions_file_name) const std::string &restrictions_file_name,
const std::string &name_file_name)
{ {
try try
{ {
unsigned number_of_used_nodes = 0;
unsigned number_of_used_edges = 0;
std::cout << "[extractor] Sorting used nodes ... " << std::flush;
TIMER_START(sorting_used_nodes);
stxxl::sort(used_node_id_list.begin(), used_node_id_list.end(), Cmp(), stxxl_memory);
TIMER_STOP(sorting_used_nodes);
std::cout << "ok, after " << TIMER_SEC(sorting_used_nodes) << "s" << std::endl;
std::cout << "[extractor] Erasing duplicate nodes ... " << std::flush;
TIMER_START(erasing_dups);
auto new_end = std::unique(used_node_id_list.begin(), used_node_id_list.end());
used_node_id_list.resize(new_end - used_node_id_list.begin());
TIMER_STOP(erasing_dups);
std::cout << "ok, after " << TIMER_SEC(erasing_dups) << "s" << std::endl;
std::cout << "[extractor] Sorting all nodes ... " << std::flush;
TIMER_START(sorting_nodes);
stxxl::sort(all_nodes_list.begin(), all_nodes_list.end(), ExternalMemoryNodeSTXXLCompare(),
stxxl_memory);
TIMER_STOP(sorting_nodes);
std::cout << "ok, after " << TIMER_SEC(sorting_nodes) << "s" << std::endl;
std::cout << "[extractor] Sorting used ways ... " << std::flush;
TIMER_START(sort_ways);
stxxl::sort(way_start_end_id_list.begin(), way_start_end_id_list.end(),
FirstAndLastSegmentOfWayStxxlCompare(), stxxl_memory);
TIMER_STOP(sort_ways);
std::cout << "ok, after " << TIMER_SEC(sort_ways) << "s" << std::endl;
std::cout << "[extractor] Sorting " << restrictions_list.size()
<< " restrictions. by from... " << std::flush;
TIMER_START(sort_restrictions);
stxxl::sort(restrictions_list.begin(), restrictions_list.end(),
CmpRestrictionContainerByFrom(), stxxl_memory);
TIMER_STOP(sort_restrictions);
std::cout << "ok, after " << TIMER_SEC(sort_restrictions) << "s" << std::endl;
std::cout << "[extractor] Fixing restriction starts ... " << std::flush;
TIMER_START(fix_restriction_starts);
auto restrictions_iterator = restrictions_list.begin();
auto way_start_and_end_iterator = way_start_end_id_list.cbegin();
while (way_start_and_end_iterator != way_start_end_id_list.cend() &&
restrictions_iterator != restrictions_list.end())
{
if (way_start_and_end_iterator->way_id < restrictions_iterator->restriction.from.way)
{
++way_start_and_end_iterator;
continue;
}
if (way_start_and_end_iterator->way_id > restrictions_iterator->restriction.from.way)
{
++restrictions_iterator;
continue;
}
BOOST_ASSERT(way_start_and_end_iterator->way_id ==
restrictions_iterator->restriction.from.way);
const NodeID via_node_id = restrictions_iterator->restriction.via.node;
if (way_start_and_end_iterator->first_segment_source_id == via_node_id)
{
restrictions_iterator->restriction.from.node =
way_start_and_end_iterator->first_segment_target_id;
}
else if (way_start_and_end_iterator->last_segment_target_id == via_node_id)
{
restrictions_iterator->restriction.from.node =
way_start_and_end_iterator->last_segment_source_id;
}
++restrictions_iterator;
}
TIMER_STOP(fix_restriction_starts);
std::cout << "ok, after " << TIMER_SEC(fix_restriction_starts) << "s" << std::endl;
std::cout << "[extractor] Sorting restrictions. by to ... " << std::flush;
TIMER_START(sort_restrictions_to);
stxxl::sort(restrictions_list.begin(), restrictions_list.end(),
CmpRestrictionContainerByTo(), stxxl_memory);
TIMER_STOP(sort_restrictions_to);
std::cout << "ok, after " << TIMER_SEC(sort_restrictions_to) << "s" << std::endl;
std::cout << "[extractor] Fixing restriction ends ... " << std::flush;
TIMER_START(fix_restriction_ends);
restrictions_iterator = restrictions_list.begin();
way_start_and_end_iterator = way_start_end_id_list.cbegin();
while (way_start_and_end_iterator != way_start_end_id_list.cend() &&
restrictions_iterator != restrictions_list.end())
{
if (way_start_and_end_iterator->way_id < restrictions_iterator->restriction.to.way)
{
++way_start_and_end_iterator;
continue;
}
if (way_start_and_end_iterator->way_id > restrictions_iterator->restriction.to.way)
{
++restrictions_iterator;
continue;
}
BOOST_ASSERT(way_start_and_end_iterator->way_id ==
restrictions_iterator->restriction.to.way);
const NodeID via_node_id = restrictions_iterator->restriction.via.node;
if (way_start_and_end_iterator->first_segment_source_id == via_node_id)
{
restrictions_iterator->restriction.to.node =
way_start_and_end_iterator->first_segment_target_id;
}
else if (way_start_and_end_iterator->last_segment_target_id == via_node_id)
{
restrictions_iterator->restriction.to.node =
way_start_and_end_iterator->last_segment_source_id;
}
++restrictions_iterator;
}
TIMER_STOP(fix_restriction_ends);
std::cout << "ok, after " << TIMER_SEC(fix_restriction_ends) << "s" << std::endl;
// serialize restrictions
std::ofstream restrictions_out_stream;
unsigned written_restriction_count = 0;
restrictions_out_stream.open(restrictions_file_name.c_str(), std::ios::binary);
restrictions_out_stream.write((char *)&fingerprint, sizeof(FingerPrint));
const auto count_position = restrictions_out_stream.tellp();
restrictions_out_stream.write((char *)&written_restriction_count, sizeof(unsigned));
for (const auto &restriction_container : restrictions_list)
{
if (SPECIAL_NODEID != restriction_container.restriction.from.node &&
SPECIAL_NODEID != restriction_container.restriction.to.node)
{
restrictions_out_stream.write((char *)&(restriction_container.restriction),
sizeof(TurnRestriction));
++written_restriction_count;
}
}
restrictions_out_stream.seekp(count_position);
restrictions_out_stream.write((char *)&written_restriction_count, sizeof(unsigned));
restrictions_out_stream.close();
SimpleLogger().Write() << "usable restrictions: " << written_restriction_count;
std::ofstream file_out_stream; std::ofstream file_out_stream;
file_out_stream.open(output_file_name.c_str(), std::ios::binary); file_out_stream.open(output_file_name.c_str(), std::ios::binary);
const FingerPrint fingerprint = FingerPrint::GetValid();
file_out_stream.write((char *)&fingerprint, sizeof(FingerPrint)); file_out_stream.write((char *)&fingerprint, sizeof(FingerPrint));
file_out_stream.write((char *)&number_of_used_nodes, sizeof(unsigned));
std::cout << "[extractor] Confirming/Writing used nodes ... " << std::flush;
TIMER_START(write_nodes);
// identify all used nodes by a merging step of two sorted lists
auto node_iterator = all_nodes_list.begin();
auto node_id_iterator = used_node_id_list.begin();
while (node_id_iterator != used_node_id_list.end() && node_iterator != all_nodes_list.end())
{
if (*node_id_iterator < node_iterator->node_id)
{
++node_id_iterator;
continue;
}
if (*node_id_iterator > node_iterator->node_id)
{
++node_iterator;
continue;
}
BOOST_ASSERT(*node_id_iterator == node_iterator->node_id);
file_out_stream.write((char *)&(*node_iterator), sizeof(ExternalMemoryNode)); PrepareNodes();
WriteNodes(file_out_stream);
PrepareEdges();
WriteEdges(file_out_stream);
++number_of_used_nodes;
++node_id_iterator;
++node_iterator;
}
TIMER_STOP(write_nodes);
std::cout << "ok, after " << TIMER_SEC(write_nodes) << "s" << std::endl;
std::cout << "[extractor] setting number of nodes ... " << std::flush;
std::ios::pos_type previous_file_position = file_out_stream.tellp();
file_out_stream.seekp(std::ios::beg + sizeof(FingerPrint));
file_out_stream.write((char *)&number_of_used_nodes, sizeof(unsigned));
file_out_stream.seekp(previous_file_position);
std::cout << "ok" << std::endl;
// Sort edges by start.
std::cout << "[extractor] Sorting edges by start ... " << std::flush;
TIMER_START(sort_edges_by_start);
stxxl::sort(all_edges_list.begin(), all_edges_list.end(), CmpEdgeByStartID(), stxxl_memory);
TIMER_STOP(sort_edges_by_start);
std::cout << "ok, after " << TIMER_SEC(sort_edges_by_start) << "s" << std::endl;
std::cout << "[extractor] Setting start coords ... " << std::flush;
TIMER_START(set_start_coords);
file_out_stream.write((char *)&number_of_used_edges, sizeof(unsigned));
// Traverse list of edges and nodes in parallel and set start coord
node_iterator = all_nodes_list.begin();
auto edge_iterator = all_edges_list.begin();
while (edge_iterator != all_edges_list.end() && node_iterator != all_nodes_list.end())
{
if (edge_iterator->start < node_iterator->node_id)
{
++edge_iterator;
continue;
}
if (edge_iterator->start > node_iterator->node_id)
{
node_iterator++;
continue;
}
BOOST_ASSERT(edge_iterator->start == node_iterator->node_id);
edge_iterator->source_coordinate.lat = node_iterator->lat;
edge_iterator->source_coordinate.lon = node_iterator->lon;
++edge_iterator;
}
TIMER_STOP(set_start_coords);
std::cout << "ok, after " << TIMER_SEC(set_start_coords) << "s" << std::endl;
// Sort Edges by target
std::cout << "[extractor] Sorting edges by target ... " << std::flush;
TIMER_START(sort_edges_by_target);
stxxl::sort(all_edges_list.begin(), all_edges_list.end(), CmpEdgeByTargetID(),
stxxl_memory);
TIMER_STOP(sort_edges_by_target);
std::cout << "ok, after " << TIMER_SEC(sort_edges_by_target) << "s" << std::endl;
std::cout << "[extractor] Setting target coords ... " << std::flush;
TIMER_START(set_target_coords);
// Traverse list of edges and nodes in parallel and set target coord
node_iterator = all_nodes_list.begin();
edge_iterator = all_edges_list.begin();
// Also serializes the edges
while (edge_iterator != all_edges_list.end() && node_iterator != all_nodes_list.end())
{
if (edge_iterator->target < node_iterator->node_id)
{
++edge_iterator;
continue;
}
if (edge_iterator->target > node_iterator->node_id)
{
++node_iterator;
continue;
}
BOOST_ASSERT(edge_iterator->target == node_iterator->node_id);
if (edge_iterator->source_coordinate.lat != std::numeric_limits<int>::min() &&
edge_iterator->source_coordinate.lon != std::numeric_limits<int>::min())
{
BOOST_ASSERT(edge_iterator->speed != -1);
edge_iterator->target_coordinate.lat = node_iterator->lat;
edge_iterator->target_coordinate.lon = node_iterator->lon;
const double distance = coordinate_calculation::euclidean_distance(
edge_iterator->source_coordinate.lat, edge_iterator->source_coordinate.lon,
node_iterator->lat, node_iterator->lon);
const double weight = (distance * 10.) / (edge_iterator->speed / 3.6);
int integer_weight = std::max(
1, (int)std::floor(
(edge_iterator->is_duration_set ? edge_iterator->speed : weight) + .5));
// FIXME: This means we have a _minimum_ edge length of 1m
// maybe use dm as base unit?
const int integer_distance = std::max(1, (int)distance);
const short zero = 0;
const short one = 1;
const bool yes = true;
const bool no = false;
file_out_stream.write((char *)&edge_iterator->start, sizeof(unsigned));
file_out_stream.write((char *)&edge_iterator->target, sizeof(unsigned));
file_out_stream.write((char *)&integer_distance, sizeof(int));
switch (edge_iterator->direction)
{
case ExtractionWay::notSure:
file_out_stream.write((char *)&zero, sizeof(short));
break;
case ExtractionWay::oneway:
file_out_stream.write((char *)&one, sizeof(short));
break;
case ExtractionWay::bidirectional:
file_out_stream.write((char *)&zero, sizeof(short));
break;
case ExtractionWay::opposite:
file_out_stream.write((char *)&one, sizeof(short));
break;
default:
throw osrm::exception("edge has broken direction");
}
file_out_stream.write((char *)&integer_weight, sizeof(int));
file_out_stream.write((char *)&edge_iterator->name_id, sizeof(unsigned));
if (edge_iterator->is_roundabout)
{
file_out_stream.write((char *)&yes, sizeof(bool));
}
else
{
file_out_stream.write((char *)&no, sizeof(bool));
}
if (edge_iterator->is_in_tiny_cc)
{
file_out_stream.write((char *)&yes, sizeof(bool));
}
else
{
file_out_stream.write((char *)&no, sizeof(bool));
}
if (edge_iterator->is_access_restricted)
{
file_out_stream.write((char *)&yes, sizeof(bool));
}
else
{
file_out_stream.write((char *)&no, sizeof(bool));
}
// cannot take adress of bit field, so use local
const TravelMode travel_mode = edge_iterator->travel_mode;
file_out_stream.write((char *)&travel_mode, sizeof(TravelMode));
if (edge_iterator->is_split)
{
file_out_stream.write((char *)&yes, sizeof(bool));
}
else
{
file_out_stream.write((char *)&no, sizeof(bool));
}
++number_of_used_edges;
}
++edge_iterator;
}
TIMER_STOP(set_target_coords);
std::cout << "ok, after " << TIMER_SEC(set_target_coords) << "s" << std::endl;
std::cout << "[extractor] setting number of edges ... " << std::flush;
file_out_stream.seekp(previous_file_position);
file_out_stream.write((char *)&number_of_used_edges, sizeof(unsigned));
file_out_stream.close(); file_out_stream.close();
std::cout << "ok" << std::endl;
std::cout << "[extractor] writing street name index ... " << std::flush; PrepareRestrictions();
TIMER_START(write_name_index); WriteRestrictions(restrictions_file_name);
std::string name_file_streamName = (output_file_name + ".names");
boost::filesystem::ofstream name_file_stream(name_file_streamName, std::ios::binary);
unsigned total_length = 0; WriteNames(name_file_name);
std::vector<unsigned> name_lengths;
for (const std::string &temp_string : name_list)
{
const unsigned string_length =
std::min(static_cast<unsigned>(temp_string.length()), 255u);
name_lengths.push_back(string_length);
total_length += string_length;
}
RangeTable<> table(name_lengths);
name_file_stream << table;
name_file_stream.write((char *)&total_length, sizeof(unsigned));
// write all chars consecutively
for (const std::string &temp_string : name_list)
{
const unsigned string_length =
std::min(static_cast<unsigned>(temp_string.length()), 255u);
name_file_stream.write(temp_string.c_str(), string_length);
}
name_file_stream.close();
TIMER_STOP(write_name_index);
std::cout << "ok, after " << TIMER_SEC(write_name_index) << "s" << std::endl;
SimpleLogger().Write() << "Processed " << number_of_used_nodes << " nodes and "
<< number_of_used_edges << " edges";
} }
catch (const std::exception &e) catch (const std::exception &e)
{ {
std::cerr << "Caught Execption:" << e.what() << std::endl; std::cerr << "Caught Execption:" << e.what() << std::endl;
} }
} }
void ExtractionContainers::WriteNames(const std::string& names_file_name) const
{
std::cout << "[extractor] writing street name index ... " << std::flush;
TIMER_START(write_name_index);
boost::filesystem::ofstream name_file_stream(names_file_name, std::ios::binary);
unsigned total_length = 0;
std::vector<unsigned> name_lengths;
for (const std::string &temp_string : name_list)
{
const unsigned string_length =
std::min(static_cast<unsigned>(temp_string.length()), 255u);
name_lengths.push_back(string_length);
total_length += string_length;
}
// builds and writes the index
RangeTable<> name_index_range(name_lengths);
name_file_stream << name_index_range;
name_file_stream.write((char *)&total_length, sizeof(unsigned));
// write all chars consecutively
for (const std::string &temp_string : name_list)
{
const unsigned string_length =
std::min(static_cast<unsigned>(temp_string.length()), 255u);
name_file_stream.write(temp_string.c_str(), string_length);
}
name_file_stream.close();
TIMER_STOP(write_name_index);
std::cout << "ok, after " << TIMER_SEC(write_name_index) << "s" << std::endl;
}
void ExtractionContainers::PrepareNodes()
{
std::cout << "[extractor] Sorting used nodes ... " << std::flush;
TIMER_START(sorting_used_nodes);
stxxl::sort(used_node_id_list.begin(), used_node_id_list.end(), Cmp(), stxxl_memory);
TIMER_STOP(sorting_used_nodes);
std::cout << "ok, after " << TIMER_SEC(sorting_used_nodes) << "s" << std::endl;
std::cout << "[extractor] Erasing duplicate nodes ... " << std::flush;
TIMER_START(erasing_dups);
auto new_end = std::unique(used_node_id_list.begin(), used_node_id_list.end());
used_node_id_list.resize(new_end - used_node_id_list.begin());
TIMER_STOP(erasing_dups);
std::cout << "ok, after " << TIMER_SEC(erasing_dups) << "s" << std::endl;
std::cout << "[extractor] Building node id map ... " << std::flush;
TIMER_START(id_map);
external_to_internal_node_id_map.reserve(used_node_id_list.size());
for (NodeID i = 0u; i < used_node_id_list.size(); ++i)
external_to_internal_node_id_map[used_node_id_list[i]] = i;
TIMER_STOP(id_map);
std::cout << "ok, after " << TIMER_SEC(id_map) << "s" << std::endl;
std::cout << "[extractor] Sorting all nodes ... " << std::flush;
TIMER_START(sorting_nodes);
stxxl::sort(all_nodes_list.begin(), all_nodes_list.end(), ExternalMemoryNodeSTXXLCompare(),
stxxl_memory);
TIMER_STOP(sorting_nodes);
std::cout << "ok, after " << TIMER_SEC(sorting_nodes) << "s" << std::endl;
}
void ExtractionContainers::PrepareEdges()
{
// Sort edges by start.
std::cout << "[extractor] Sorting edges by start ... " << std::flush;
TIMER_START(sort_edges_by_start);
stxxl::sort(all_edges_list.begin(), all_edges_list.end(), CmpEdgeByStartID(), stxxl_memory);
TIMER_STOP(sort_edges_by_start);
std::cout << "ok, after " << TIMER_SEC(sort_edges_by_start) << "s" << std::endl;
std::cout << "[extractor] Setting start coords ... " << std::flush;
TIMER_START(set_start_coords);
// Traverse list of edges and nodes in parallel and set start coord
auto node_iterator = all_nodes_list.begin();
auto edge_iterator = all_edges_list.begin();
while (edge_iterator != all_edges_list.end() && node_iterator != all_nodes_list.end())
{
if (edge_iterator->result.source < node_iterator->node_id)
{
edge_iterator->result.source = SPECIAL_NODEID;
++edge_iterator;
continue;
}
if (edge_iterator->result.source > node_iterator->node_id)
{
node_iterator++;
continue;
}
// remove loops
if (edge_iterator->result.source == edge_iterator->result.target)
{
edge_iterator->result.source = SPECIAL_NODEID;
edge_iterator->result.target = SPECIAL_NODEID;
++edge_iterator;
continue;
}
BOOST_ASSERT(edge_iterator->result.source == node_iterator->node_id);
// assign new node id
auto id_iter = external_to_internal_node_id_map.find(node_iterator->node_id);
BOOST_ASSERT(id_iter != external_to_internal_node_id_map.end());
edge_iterator->result.source = id_iter->second;
edge_iterator->source_coordinate.lat = node_iterator->lat;
edge_iterator->source_coordinate.lon = node_iterator->lon;
++edge_iterator;
}
TIMER_STOP(set_start_coords);
std::cout << "ok, after " << TIMER_SEC(set_start_coords) << "s" << std::endl;
// Sort Edges by target
std::cout << "[extractor] Sorting edges by target ... " << std::flush;
TIMER_START(sort_edges_by_target);
stxxl::sort(all_edges_list.begin(), all_edges_list.end(), CmpEdgeByTargetID(),
stxxl_memory);
TIMER_STOP(sort_edges_by_target);
std::cout << "ok, after " << TIMER_SEC(sort_edges_by_target) << "s" << std::endl;
// Compute edge weights
std::cout << "[extractor] Computing edge weights ... " << std::flush;
TIMER_START(compute_weights);
node_iterator = all_nodes_list.begin();
edge_iterator = all_edges_list.begin();
while (edge_iterator != all_edges_list.end() && node_iterator != all_nodes_list.end())
{
// skip all invalid edges
if (edge_iterator->result.source == SPECIAL_NODEID)
{
++edge_iterator;
continue;
}
if (edge_iterator->result.target < node_iterator->node_id)
{
edge_iterator->result.target = SPECIAL_NODEID;
++edge_iterator;
continue;
}
if (edge_iterator->result.target > node_iterator->node_id)
{
++node_iterator;
continue;
}
BOOST_ASSERT(edge_iterator->result.target == node_iterator->node_id);
BOOST_ASSERT(edge_iterator->weight_data.speed >= 0);
BOOST_ASSERT(edge_iterator->source_coordinate.lat != std::numeric_limits<int>::min());
BOOST_ASSERT(edge_iterator->source_coordinate.lon != std::numeric_limits<int>::min());
const double distance = coordinate_calculation::euclidean_distance(
edge_iterator->source_coordinate.lat, edge_iterator->source_coordinate.lon,
node_iterator->lat, node_iterator->lon);
const double weight = [distance](const InternalExtractorEdge::WeightData& data) {
switch (data.type)
{
case InternalExtractorEdge::WeightType::EDGE_DURATION:
case InternalExtractorEdge::WeightType::WAY_DURATION:
return data.duration * 10.;
break;
case InternalExtractorEdge::WeightType::SPEED:
return (distance * 10.) / (data.speed / 3.6);
break;
case InternalExtractorEdge::WeightType::INVALID:
osrm::exception("invalid weight type");
}
return -1.0;
}(edge_iterator->weight_data);
auto& edge = edge_iterator->result;
edge.weight = std::max(1, static_cast<int>(std::floor(weight + .5)));
// assign new node id
auto id_iter = external_to_internal_node_id_map.find(node_iterator->node_id);
BOOST_ASSERT(id_iter != external_to_internal_node_id_map.end());
edge.target = id_iter->second;
// orient edges consistently: source id < target id
// important for multi-edge removal
if (edge.source > edge.target)
{
std::swap(edge.source, edge.target);
// std::swap does not work with bit-fields
bool temp = edge.forward;
edge.forward = edge.backward;
edge.backward = temp;
}
++edge_iterator;
}
TIMER_STOP(compute_weights);
std::cout << "ok, after " << TIMER_SEC(compute_weights) << "s" << std::endl;
// Sort edges by start.
std::cout << "[extractor] Sorting edges by renumbered start ... " << std::flush;
TIMER_START(sort_edges_by_renumbered_start);
stxxl::sort(all_edges_list.begin(), all_edges_list.end(), CmpEdgeByStartThenTargetID(), stxxl_memory);
TIMER_STOP(sort_edges_by_renumbered_start);
std::cout << "ok, after " << TIMER_SEC(sort_edges_by_renumbered_start) << "s" << std::endl;
BOOST_ASSERT(all_edges_list.size() > 0);
for (unsigned i = 0; i < all_edges_list.size();)
{
// only invalid edges left
if (all_edges_list[i].result.source == SPECIAL_NODEID)
{
break;
}
// skip invalid edges
if (all_edges_list[i].result.target == SPECIAL_NODEID)
{
continue;
}
unsigned start_idx = i;
NodeID source = all_edges_list[i].result.source;
NodeID target = all_edges_list[i].result.target;
int min_forward_weight = std::numeric_limits<int>::max();
int min_backward_weight = std::numeric_limits<int>::max();
unsigned min_forward_idx = std::numeric_limits<unsigned>::max();
unsigned min_backward_idx = std::numeric_limits<unsigned>::max();
// find minimal edge in both directions
while (all_edges_list[i].result.source == source &&
all_edges_list[i].result.target == target)
{
if (all_edges_list[i].result.forward && all_edges_list[i].result.weight < min_forward_weight)
{
min_forward_idx = i;
}
if (all_edges_list[i].result.backward && all_edges_list[i].result.weight < min_backward_weight)
{
min_backward_idx = i;
}
// this also increments the outer loop counter!
i++;
}
BOOST_ASSERT(min_forward_idx == std::numeric_limits<unsigned>::max() || min_forward_idx < i);
BOOST_ASSERT(min_backward_idx == std::numeric_limits<unsigned>::max() || min_backward_idx < i);
BOOST_ASSERT(min_backward_idx != std::numeric_limits<unsigned>::max() ||
min_forward_idx != std::numeric_limits<unsigned>::max());
if (min_backward_idx == min_forward_idx)
{
all_edges_list[min_forward_idx].result.is_split = false;
all_edges_list[min_forward_idx].result.forward = true;
all_edges_list[min_forward_idx].result.backward = true;
}
else
{
bool has_forward = min_forward_idx != std::numeric_limits<unsigned>::max();
bool has_backward = min_backward_idx != std::numeric_limits<unsigned>::max();
if (has_forward)
{
all_edges_list[min_forward_idx].result.forward = true;
all_edges_list[min_forward_idx].result.backward = false;
all_edges_list[min_forward_idx].result.is_split = has_backward;
}
if (has_backward)
{
std::swap(all_edges_list[min_backward_idx].result.source,
all_edges_list[min_backward_idx].result.target);
all_edges_list[min_backward_idx].result.forward = true;
all_edges_list[min_backward_idx].result.backward = false;
all_edges_list[min_backward_idx].result.is_split = has_forward;
}
}
// invalidate all unused edges
for (unsigned j = start_idx; j < i; j++)
{
if (j == min_forward_idx || j == min_backward_idx)
{
continue;
}
all_edges_list[j].result.source = SPECIAL_NODEID;
all_edges_list[j].result.target = SPECIAL_NODEID;
}
}
}
void ExtractionContainers::WriteEdges(std::ofstream& file_out_stream) const
{
std::cout << "[extractor] Writing used egdes ... " << std::flush;
TIMER_START(write_edges);
// Traverse list of edges and nodes in parallel and set target coord
unsigned number_of_used_edges = 0;
auto start_position = file_out_stream.tellp();
file_out_stream.write((char *)&number_of_used_edges, sizeof(unsigned));
for (const auto& edge : all_edges_list)
{
if (edge.result.source == SPECIAL_NODEID || edge.result.target == SPECIAL_NODEID)
{
continue;
}
file_out_stream.write((char*) &edge.result, sizeof(NodeBasedEdge));
number_of_used_edges++;
}
TIMER_STOP(write_edges);
std::cout << "ok, after " << TIMER_SEC(write_edges) << "s" << std::endl;
std::cout << "[extractor] setting number of edges ... " << std::flush;
file_out_stream.seekp(start_position);
file_out_stream.write((char *)&number_of_used_edges, sizeof(unsigned));
std::cout << "ok" << std::endl;
SimpleLogger().Write() << "Processed " << number_of_used_edges << " edges";
}
void ExtractionContainers::WriteNodes(std::ofstream& file_out_stream) const
{
unsigned number_of_used_nodes = 0;
// write dummy value, will be overwritten later
file_out_stream.write((char *)&number_of_used_nodes, sizeof(unsigned));
std::cout << "[extractor] Confirming/Writing used nodes ... " << std::flush;
TIMER_START(write_nodes);
// identify all used nodes by a merging step of two sorted lists
auto node_iterator = all_nodes_list.begin();
auto node_id_iterator = used_node_id_list.begin();
while (node_id_iterator != used_node_id_list.end() && node_iterator != all_nodes_list.end())
{
if (*node_id_iterator < node_iterator->node_id)
{
++node_id_iterator;
continue;
}
if (*node_id_iterator > node_iterator->node_id)
{
++node_iterator;
continue;
}
BOOST_ASSERT(*node_id_iterator == node_iterator->node_id);
file_out_stream.write((char *)&(*node_iterator), sizeof(ExternalMemoryNode));
++number_of_used_nodes;
++node_id_iterator;
++node_iterator;
}
TIMER_STOP(write_nodes);
std::cout << "ok, after " << TIMER_SEC(write_nodes) << "s" << std::endl;
std::cout << "[extractor] setting number of nodes ... " << std::flush;
std::ios::pos_type previous_file_position = file_out_stream.tellp();
file_out_stream.seekp(std::ios::beg + sizeof(FingerPrint));
file_out_stream.write((char *)&number_of_used_nodes, sizeof(unsigned));
file_out_stream.seekp(previous_file_position);
std::cout << "ok" << std::endl;
SimpleLogger().Write() << "Processed " << number_of_used_nodes << " nodes";
}
void ExtractionContainers::WriteRestrictions(const std::string& path) const
{
// serialize restrictions
std::ofstream restrictions_out_stream;
unsigned written_restriction_count = 0;
restrictions_out_stream.open(path.c_str(), std::ios::binary);
const FingerPrint fingerprint = FingerPrint::GetValid();
restrictions_out_stream.write((char *)&fingerprint, sizeof(FingerPrint));
const auto count_position = restrictions_out_stream.tellp();
restrictions_out_stream.write((char *)&written_restriction_count, sizeof(unsigned));
for (const auto &restriction_container : restrictions_list)
{
if (SPECIAL_NODEID != restriction_container.restriction.from.node &&
SPECIAL_NODEID != restriction_container.restriction.via.node &&
SPECIAL_NODEID != restriction_container.restriction.to.node)
{
restrictions_out_stream.write((char *)&(restriction_container.restriction),
sizeof(TurnRestriction));
++written_restriction_count;
}
}
restrictions_out_stream.seekp(count_position);
restrictions_out_stream.write((char *)&written_restriction_count, sizeof(unsigned));
restrictions_out_stream.close();
SimpleLogger().Write() << "usable restrictions: " << written_restriction_count;
}
void ExtractionContainers::PrepareRestrictions()
{
std::cout << "[extractor] Sorting used ways ... " << std::flush;
TIMER_START(sort_ways);
stxxl::sort(way_start_end_id_list.begin(), way_start_end_id_list.end(),
FirstAndLastSegmentOfWayStxxlCompare(), stxxl_memory);
TIMER_STOP(sort_ways);
std::cout << "ok, after " << TIMER_SEC(sort_ways) << "s" << std::endl;
std::cout << "[extractor] Sorting " << restrictions_list.size()
<< " restriction. by from... " << std::flush;
TIMER_START(sort_restrictions);
stxxl::sort(restrictions_list.begin(), restrictions_list.end(),
CmpRestrictionContainerByFrom(), stxxl_memory);
TIMER_STOP(sort_restrictions);
std::cout << "ok, after " << TIMER_SEC(sort_restrictions) << "s" << std::endl;
std::cout << "[extractor] Fixing restriction starts ... " << std::flush;
TIMER_START(fix_restriction_starts);
auto restrictions_iterator = restrictions_list.begin();
auto way_start_and_end_iterator = way_start_end_id_list.cbegin();
while (way_start_and_end_iterator != way_start_end_id_list.cend() &&
restrictions_iterator != restrictions_list.end())
{
if (way_start_and_end_iterator->way_id < restrictions_iterator->restriction.from.way)
{
++way_start_and_end_iterator;
continue;
}
if (way_start_and_end_iterator->way_id > restrictions_iterator->restriction.from.way)
{
SimpleLogger().Write(LogLevel::logDEBUG) << "Restriction references invalid way: " << restrictions_iterator->restriction.from.way;
restrictions_iterator->restriction.from.node = SPECIAL_NODEID;
++restrictions_iterator;
continue;
}
BOOST_ASSERT(way_start_and_end_iterator->way_id ==
restrictions_iterator->restriction.from.way);
// we do not remap the via id yet, since we will need it for the to node as well
const NodeID via_node_id = restrictions_iterator->restriction.via.node;
// check if via is actually valid, if not invalidate
auto via_id_iter = external_to_internal_node_id_map.find(via_node_id);
if(via_id_iter == external_to_internal_node_id_map.end())
{
SimpleLogger().Write(LogLevel::logDEBUG) << "Restriction references invalid node: " << restrictions_iterator->restriction.via.node;
restrictions_iterator->restriction.via.node = SPECIAL_NODEID;
++restrictions_iterator;
continue;
}
if (way_start_and_end_iterator->first_segment_source_id == via_node_id)
{
// assign new from node id
auto id_iter = external_to_internal_node_id_map.find(
way_start_and_end_iterator->first_segment_target_id);
BOOST_ASSERT(id_iter != external_to_internal_node_id_map.end());
restrictions_iterator->restriction.from.node = id_iter->second;
}
else if (way_start_and_end_iterator->last_segment_target_id == via_node_id)
{
// assign new from node id
auto id_iter = external_to_internal_node_id_map.find(
way_start_and_end_iterator->last_segment_source_id);
BOOST_ASSERT(id_iter != external_to_internal_node_id_map.end());
restrictions_iterator->restriction.from.node = id_iter->second;
}
++restrictions_iterator;
}
TIMER_STOP(fix_restriction_starts);
std::cout << "ok, after " << TIMER_SEC(fix_restriction_starts) << "s" << std::endl;
std::cout << "[extractor] Sorting restrictions. by to ... " << std::flush;
TIMER_START(sort_restrictions_to);
stxxl::sort(restrictions_list.begin(), restrictions_list.end(),
CmpRestrictionContainerByTo(), stxxl_memory);
TIMER_STOP(sort_restrictions_to);
std::cout << "ok, after " << TIMER_SEC(sort_restrictions_to) << "s" << std::endl;
std::cout << "[extractor] Fixing restriction ends ... " << std::flush;
TIMER_START(fix_restriction_ends);
restrictions_iterator = restrictions_list.begin();
way_start_and_end_iterator = way_start_end_id_list.cbegin();
while (way_start_and_end_iterator != way_start_end_id_list.cend() &&
restrictions_iterator != restrictions_list.end())
{
if (way_start_and_end_iterator->way_id < restrictions_iterator->restriction.to.way)
{
++way_start_and_end_iterator;
continue;
}
if (restrictions_iterator->restriction.from.node == SPECIAL_NODEID ||
restrictions_iterator->restriction.via.node == SPECIAL_NODEID)
{
++restrictions_iterator;
continue;
}
if (way_start_and_end_iterator->way_id > restrictions_iterator->restriction.to.way)
{
SimpleLogger().Write(LogLevel::logDEBUG) << "Restriction references invalid way: " << restrictions_iterator->restriction.to.way;
restrictions_iterator->restriction.to.way = SPECIAL_NODEID;
++restrictions_iterator;
continue;
}
BOOST_ASSERT(way_start_and_end_iterator->way_id ==
restrictions_iterator->restriction.to.way);
const NodeID via_node_id = restrictions_iterator->restriction.via.node;
// assign new via node id
auto via_id_iter = external_to_internal_node_id_map.find(via_node_id);
BOOST_ASSERT(via_id_iter != external_to_internal_node_id_map.end());
restrictions_iterator->restriction.via.node = via_id_iter->second;
if (way_start_and_end_iterator->first_segment_source_id == via_node_id)
{
auto to_id_iter = external_to_internal_node_id_map.find(
way_start_and_end_iterator->first_segment_target_id);
BOOST_ASSERT(to_id_iter != external_to_internal_node_id_map.end());
restrictions_iterator->restriction.to.node = to_id_iter->second;
}
else if (way_start_and_end_iterator->last_segment_target_id == via_node_id)
{
auto to_id_iter = external_to_internal_node_id_map.find(
way_start_and_end_iterator->last_segment_source_id);
BOOST_ASSERT(to_id_iter != external_to_internal_node_id_map.end());
restrictions_iterator->restriction.to.node = to_id_iter->second;
}
++restrictions_iterator;
}
TIMER_STOP(fix_restriction_ends);
std::cout << "ok, after " << TIMER_SEC(fix_restriction_ends) << "s" << std::endl;
}

View File

@ -32,13 +32,15 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "first_and_last_segment_of_way.hpp" #include "first_and_last_segment_of_way.hpp"
#include "../data_structures/external_memory_node.hpp" #include "../data_structures/external_memory_node.hpp"
#include "../data_structures/restriction.hpp" #include "../data_structures/restriction.hpp"
#include "../util/fingerprint.hpp"
#include <stxxl/vector> #include <stxxl/vector>
#include <unordered_map>
/** /**
* Uses external memory containers from stxxl to store all the data that * Uses external memory containers from stxxl to store all the data that
* is collected by the extractor callbacks. * is collected by the extractor callbacks.
*
* The data is the filtered, aggregated and finally written to disk.
*/ */
class ExtractionContainers class ExtractionContainers
{ {
@ -49,6 +51,14 @@ class ExtractionContainers
#else #else
const static unsigned stxxl_memory = ((sizeof(std::size_t) == 4) ? INT_MAX : UINT_MAX); const static unsigned stxxl_memory = ((sizeof(std::size_t) == 4) ? INT_MAX : UINT_MAX);
#endif #endif
void PrepareNodes();
void PrepareRestrictions();
void PrepareEdges();
void WriteNodes(std::ofstream& file_out_stream) const;
void WriteRestrictions(const std::string& restrictions_file_name) const;
void WriteEdges(std::ofstream& file_out_stream) const;
void WriteNames(const std::string& names_file_name) const;
public: public:
using STXXLNodeIDVector = stxxl::vector<NodeID>; using STXXLNodeIDVector = stxxl::vector<NodeID>;
using STXXLNodeVector = stxxl::vector<ExternalMemoryNode>; using STXXLNodeVector = stxxl::vector<ExternalMemoryNode>;
@ -63,14 +73,15 @@ class ExtractionContainers
STXXLStringVector name_list; STXXLStringVector name_list;
STXXLRestrictionsVector restrictions_list; STXXLRestrictionsVector restrictions_list;
STXXLWayIDStartEndVector way_start_end_id_list; STXXLWayIDStartEndVector way_start_end_id_list;
const FingerPrint fingerprint; std::unordered_map<NodeID, NodeID> external_to_internal_node_id_map;
ExtractionContainers(); ExtractionContainers();
~ExtractionContainers(); ~ExtractionContainers();
void PrepareData(const std::string &output_file_name, void PrepareData(const std::string &output_file_name,
const std::string &restrictions_file_name); const std::string &restrictions_file_name,
const std::string &names_file_name);
}; };
#endif /* EXTRACTION_CONTAINERS_HPP */ #endif /* EXTRACTION_CONTAINERS_HPP */

View File

@ -102,7 +102,7 @@ unsigned parseDuration(const std::string &s)
minutes = cast::string_to_int(result[1]); minutes = cast::string_to_int(result[1]);
hours = cast::string_to_int(result[0]); hours = cast::string_to_int(result[0]);
} }
return 10 * (3600 * hours + 60 * minutes + seconds); return (3600 * hours + 60 * minutes + seconds);
} }
} }
else if (iso_8601_duration_is_valid(s)) else if (iso_8601_duration_is_valid(s))

View File

@ -82,7 +82,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* .restrictions : Turn restrictions that are used my osrm-prepare to construct the edge-expanded graph * .restrictions : Turn restrictions that are used my osrm-prepare to construct the edge-expanded graph
* *
*/ */
int extractor::run(const ExtractorConfig &extractor_config) int extractor::run()
{ {
try try
{ {
@ -91,20 +91,20 @@ int extractor::run(const ExtractorConfig &extractor_config)
const unsigned recommended_num_threads = tbb::task_scheduler_init::default_num_threads(); const unsigned recommended_num_threads = tbb::task_scheduler_init::default_num_threads();
const auto number_of_threads = const auto number_of_threads =
std::min(recommended_num_threads, extractor_config.requested_num_threads); std::min(recommended_num_threads, config.requested_num_threads);
tbb::task_scheduler_init init(number_of_threads); tbb::task_scheduler_init init(number_of_threads);
SimpleLogger().Write() << "Input file: " << extractor_config.input_path.filename().string(); SimpleLogger().Write() << "Input file: " << config.input_path.filename().string();
SimpleLogger().Write() << "Profile: " << extractor_config.profile_path.filename().string(); SimpleLogger().Write() << "Profile: " << config.profile_path.filename().string();
SimpleLogger().Write() << "Threads: " << number_of_threads; SimpleLogger().Write() << "Threads: " << number_of_threads;
// setup scripting environment // setup scripting environment
ScriptingEnvironment scripting_environment(extractor_config.profile_path.string().c_str()); ScriptingEnvironment scripting_environment(config.profile_path.string().c_str());
ExtractionContainers extraction_containers; ExtractionContainers extraction_containers;
auto extractor_callbacks = osrm::make_unique<ExtractorCallbacks>(extraction_containers); auto extractor_callbacks = osrm::make_unique<ExtractorCallbacks>(extraction_containers);
const osmium::io::File input_file(extractor_config.input_path.string()); const osmium::io::File input_file(config.input_path.string());
osmium::io::Reader reader(input_file); osmium::io::Reader reader(input_file);
const osmium::io::Header header = reader.header(); const osmium::io::Header header = reader.header();
@ -131,7 +131,7 @@ int extractor::run(const ExtractorConfig &extractor_config)
} }
SimpleLogger().Write() << "timestamp: " << timestamp; SimpleLogger().Write() << "timestamp: " << timestamp;
boost::filesystem::ofstream timestamp_out(extractor_config.timestamp_file_name); boost::filesystem::ofstream timestamp_out(config.timestamp_file_name);
timestamp_out.write(timestamp.c_str(), timestamp.length()); timestamp_out.write(timestamp.c_str(), timestamp.length());
timestamp_out.close(); timestamp_out.close();
@ -236,12 +236,13 @@ int extractor::run(const ExtractorConfig &extractor_config)
return 1; return 1;
} }
extraction_containers.PrepareData(extractor_config.output_file_name, extraction_containers.PrepareData(config.output_file_name,
extractor_config.restriction_file_name); config.restriction_file_name,
config.names_file_name);
TIMER_STOP(extracting); TIMER_STOP(extracting);
SimpleLogger().Write() << "extraction finished after " << TIMER_SEC(extracting) << "s"; SimpleLogger().Write() << "extraction finished after " << TIMER_SEC(extracting) << "s";
SimpleLogger().Write() << "To prepare the data for routing, run: " SimpleLogger().Write() << "To prepare the data for routing, run: "
<< "./osrm-prepare " << extractor_config.output_file_name << "./osrm-prepare " << config.output_file_name
<< std::endl; << std::endl;
} }
catch (std::exception &e) catch (std::exception &e)

View File

@ -30,8 +30,13 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "extractor_options.hpp" #include "extractor_options.hpp"
struct extractor class extractor
{ {
int run(const ExtractorConfig &extractor_config); public:
extractor(const ExtractorConfig &extractor_config)
: config(extractor_config) {}
int run();
private:
ExtractorConfig config;
}; };
#endif /* EXTRACTOR_HPP */ #endif /* EXTRACTOR_HPP */

View File

@ -108,17 +108,40 @@ void ExtractorCallbacks::ProcessWay(const osmium::Way &input_way, const Extracti
<< " of size " << input_way.nodes().size(); << " of size " << input_way.nodes().size();
return; return;
} }
InternalExtractorEdge::WeightData forward_weight_data;
InternalExtractorEdge::WeightData backward_weight_data;
if (0 < parsed_way.duration) if (0 < parsed_way.duration)
{ {
// TODO: iterate all way segments and set duration corresponding to the length of each const unsigned num_edges = (input_way.nodes().size() - 1);
// segment // FIXME We devide by the numer of nodes here, but should rather consider
const_cast<ExtractionWay &>(parsed_way).forward_speed = // the length of each segment. We would eigther have to compute the length
parsed_way.duration / (input_way.nodes().size() - 1); // of the whole way here (we can't: no node coordinates) or push that back
const_cast<ExtractionWay &>(parsed_way).backward_speed = // to the container and keep a reference to the way.
parsed_way.duration / (input_way.nodes().size() - 1); forward_weight_data.duration = parsed_way.duration / num_edges;
forward_weight_data.type = InternalExtractorEdge::WeightType::WAY_DURATION;
backward_weight_data.duration = parsed_way.duration / num_edges;
backward_weight_data.type = InternalExtractorEdge::WeightType::WAY_DURATION;
}
else
{
if (parsed_way.forward_speed > 0 &&
parsed_way.forward_travel_mode != TRAVEL_MODE_INACCESSIBLE)
{
forward_weight_data.speed = parsed_way.forward_speed;
forward_weight_data.type = InternalExtractorEdge::WeightType::SPEED;
}
if (parsed_way.backward_speed > 0 &&
parsed_way.backward_travel_mode != TRAVEL_MODE_INACCESSIBLE)
{
backward_weight_data.speed = parsed_way.backward_speed;
backward_weight_data.type = InternalExtractorEdge::WeightType::SPEED;
}
} }
if (std::numeric_limits<double>::epsilon() >= std::abs(-1. - parsed_way.forward_speed)) if (forward_weight_data.type == InternalExtractorEdge::WeightType::INVALID
&& backward_weight_data.type == InternalExtractorEdge::WeightType::INVALID)
{ {
SimpleLogger().Write(logDEBUG) << "found way with bogus speed, id: " << input_way.id(); SimpleLogger().Write(logDEBUG) << "found way with bogus speed, id: " << input_way.id();
return; return;
@ -144,36 +167,34 @@ void ExtractorCallbacks::ProcessWay(const osmium::Way &input_way, const Extracti
((parsed_way.forward_speed != parsed_way.backward_speed) || ((parsed_way.forward_speed != parsed_way.backward_speed) ||
(parsed_way.forward_travel_mode != parsed_way.backward_travel_mode)); (parsed_way.forward_travel_mode != parsed_way.backward_travel_mode));
auto pair_wise_segment_split = // lambda to add edge to container
auto pair_wise_segment_split_forward =
[&](const osmium::NodeRef &first_node, const osmium::NodeRef &last_node) [&](const osmium::NodeRef &first_node, const osmium::NodeRef &last_node)
{ {
// SimpleLogger().Write() << "adding edge (" << first_node.ref() << "," << const bool forward_only = split_edge || TRAVEL_MODE_INACCESSIBLE == parsed_way.backward_travel_mode;
// last_node.ref() << "), fwd speed: " << parsed_way.forward_speed;
external_memory.all_edges_list.push_back(InternalExtractorEdge( external_memory.all_edges_list.push_back(InternalExtractorEdge(
first_node.ref(), last_node.ref(), first_node.ref(), last_node.ref(), name_id, forward_weight_data,
((split_edge || TRAVEL_MODE_INACCESSIBLE == parsed_way.backward_travel_mode) true, !forward_only, parsed_way.roundabout, parsed_way.ignore_in_grid,
? ExtractionWay::oneway parsed_way.is_access_restricted, parsed_way.forward_travel_mode, split_edge));
: ExtractionWay::bidirectional),
parsed_way.forward_speed, name_id, parsed_way.roundabout, parsed_way.ignore_in_grid,
(0 < parsed_way.duration), parsed_way.is_access_restricted,
parsed_way.forward_travel_mode, split_edge));
external_memory.used_node_id_list.push_back(first_node.ref()); external_memory.used_node_id_list.push_back(first_node.ref());
}; };
const bool is_opposite_way = TRAVEL_MODE_INACCESSIBLE == parsed_way.forward_travel_mode; const bool is_opposite_way = TRAVEL_MODE_INACCESSIBLE == parsed_way.forward_travel_mode;
// traverse way in reverse in this case
if (is_opposite_way) if (is_opposite_way)
{ {
// why don't we have to swap the parsed_way.forward/backward speed here as well
const_cast<ExtractionWay &>(parsed_way).forward_travel_mode = const_cast<ExtractionWay &>(parsed_way).forward_travel_mode =
parsed_way.backward_travel_mode; parsed_way.backward_travel_mode;
const_cast<ExtractionWay &>(parsed_way).backward_travel_mode = TRAVEL_MODE_INACCESSIBLE; const_cast<ExtractionWay &>(parsed_way).backward_travel_mode = TRAVEL_MODE_INACCESSIBLE;
osrm::for_each_pair(input_way.nodes().crbegin(), input_way.nodes().crend(), osrm::for_each_pair(input_way.nodes().crbegin(), input_way.nodes().crend(),
pair_wise_segment_split); pair_wise_segment_split_forward);
external_memory.used_node_id_list.push_back(input_way.nodes().front().ref()); external_memory.used_node_id_list.push_back(input_way.nodes().front().ref());
} }
else else
{ {
osrm::for_each_pair(input_way.nodes().cbegin(), input_way.nodes().cend(), osrm::for_each_pair(input_way.nodes().cbegin(), input_way.nodes().cend(),
pair_wise_segment_split); pair_wise_segment_split_forward);
external_memory.used_node_id_list.push_back(input_way.nodes().back().ref()); external_memory.used_node_id_list.push_back(input_way.nodes().back().ref());
} }
@ -187,22 +208,19 @@ void ExtractorCallbacks::ProcessWay(const osmium::Way &input_way, const Extracti
if (split_edge) if (split_edge)
{ // Only true if the way should be split { // Only true if the way should be split
BOOST_ASSERT(parsed_way.backward_travel_mode > 0); BOOST_ASSERT(parsed_way.backward_travel_mode != TRAVEL_MODE_INACCESSIBLE);
auto pair_wise_segment_split_2 = auto pair_wise_segment_split_2 =
[&](const osmium::NodeRef &first_node, const osmium::NodeRef &last_node) [&](const osmium::NodeRef &first_node, const osmium::NodeRef &last_node)
{ {
// SimpleLogger().Write() << "adding edge (" << last_node.ref() << "," <<
// first_node.ref() << "), bwd speed: " << parsed_way.backward_speed;
external_memory.all_edges_list.push_back(InternalExtractorEdge( external_memory.all_edges_list.push_back(InternalExtractorEdge(
last_node.ref(), first_node.ref(), ExtractionWay::oneway, parsed_way.backward_speed, last_node.ref(), first_node.ref(), name_id, backward_weight_data,
name_id, parsed_way.roundabout, parsed_way.ignore_in_grid, true, false, parsed_way.roundabout, parsed_way.ignore_in_grid,
(0 < parsed_way.duration), parsed_way.is_access_restricted, parsed_way.is_access_restricted, parsed_way.backward_travel_mode, split_edge));
parsed_way.backward_travel_mode, split_edge)); external_memory.used_node_id_list.push_back(last_node.ref());
}; };
if (is_opposite_way) if (is_opposite_way)
{ {
// SimpleLogger().Write() << "opposite2";
osrm::for_each_pair(input_way.nodes().crbegin(), input_way.nodes().crend(), osrm::for_each_pair(input_way.nodes().crbegin(), input_way.nodes().crend(),
pair_wise_segment_split_2); pair_wise_segment_split_2);
external_memory.used_node_id_list.push_back(input_way.nodes().front().ref()); external_memory.used_node_id_list.push_back(input_way.nodes().front().ref());
@ -211,7 +229,7 @@ void ExtractorCallbacks::ProcessWay(const osmium::Way &input_way, const Extracti
{ {
osrm::for_each_pair(input_way.nodes().cbegin(), input_way.nodes().cend(), osrm::for_each_pair(input_way.nodes().cbegin(), input_way.nodes().cend(),
pair_wise_segment_split_2); pair_wise_segment_split_2);
external_memory.used_node_id_list.push_back(input_way.nodes().back().ref()); external_memory.used_node_id_list.push_back(input_way.nodes().front().ref());
} }
external_memory.way_start_end_id_list.push_back( external_memory.way_start_end_id_list.push_back(

View File

@ -135,6 +135,7 @@ void ExtractorOptions::GenerateOutputFilesNames(ExtractorConfig &extractor_confi
boost::filesystem::path &input_path = extractor_config.input_path; boost::filesystem::path &input_path = extractor_config.input_path;
extractor_config.output_file_name = input_path.string(); extractor_config.output_file_name = input_path.string();
extractor_config.restriction_file_name = input_path.string(); extractor_config.restriction_file_name = input_path.string();
extractor_config.names_file_name = input_path.string();
extractor_config.timestamp_file_name = input_path.string(); extractor_config.timestamp_file_name = input_path.string();
std::string::size_type pos = extractor_config.output_file_name.find(".osm.bz2"); std::string::size_type pos = extractor_config.output_file_name.find(".osm.bz2");
if (pos == std::string::npos) if (pos == std::string::npos)
@ -156,12 +157,14 @@ void ExtractorOptions::GenerateOutputFilesNames(ExtractorConfig &extractor_confi
{ {
extractor_config.output_file_name.append(".osrm"); extractor_config.output_file_name.append(".osrm");
extractor_config.restriction_file_name.append(".osrm.restrictions"); extractor_config.restriction_file_name.append(".osrm.restrictions");
extractor_config.names_file_name.append(".osrm.names");
extractor_config.timestamp_file_name.append(".osrm.timestamp"); extractor_config.timestamp_file_name.append(".osrm.timestamp");
} }
else else
{ {
extractor_config.output_file_name.replace(pos, 5, ".osrm"); extractor_config.output_file_name.replace(pos, 5, ".osrm");
extractor_config.restriction_file_name.replace(pos, 5, ".osrm.restrictions"); extractor_config.restriction_file_name.replace(pos, 5, ".osrm.restrictions");
extractor_config.names_file_name.replace(pos, 5, ".osrm.names");
extractor_config.timestamp_file_name.replace(pos, 5, ".osrm.timestamp"); extractor_config.timestamp_file_name.replace(pos, 5, ".osrm.timestamp");
} }
} }
@ -169,6 +172,7 @@ void ExtractorOptions::GenerateOutputFilesNames(ExtractorConfig &extractor_confi
{ {
extractor_config.output_file_name.replace(pos, 8, ".osrm"); extractor_config.output_file_name.replace(pos, 8, ".osrm");
extractor_config.restriction_file_name.replace(pos, 8, ".osrm.restrictions"); extractor_config.restriction_file_name.replace(pos, 8, ".osrm.restrictions");
extractor_config.names_file_name.replace(pos, 8, ".osrm.names");
extractor_config.timestamp_file_name.replace(pos, 8, ".osrm.timestamp"); extractor_config.timestamp_file_name.replace(pos, 8, ".osrm.timestamp");
} }
} }

View File

@ -48,6 +48,7 @@ struct ExtractorConfig
std::string output_file_name; std::string output_file_name;
std::string restriction_file_name; std::string restriction_file_name;
std::string names_file_name;
std::string timestamp_file_name; std::string timestamp_file_name;
unsigned requested_num_threads; unsigned requested_num_threads;

View File

@ -30,6 +30,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../typedefs.h" #include "../typedefs.h"
#include "../data_structures/travel_mode.hpp" #include "../data_structures/travel_mode.hpp"
#include "../data_structures/import_edge.hpp"
#include <boost/assert.hpp> #include <boost/assert.hpp>
@ -37,83 +38,108 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
struct InternalExtractorEdge struct InternalExtractorEdge
{ {
InternalExtractorEdge() // specify the type of the weight data
: start(0), target(0), speed(0), name_id(0), direction(0), is_roundabout(false), enum class WeightType : char {
is_in_tiny_cc(false), is_duration_set(false), is_access_restricted(false), INVALID,
is_split(false), travel_mode(TRAVEL_MODE_INACCESSIBLE) SPEED,
EDGE_DURATION,
WAY_DURATION,
};
struct WeightData
{
WeightData() : duration(0.0), type(WeightType::INVALID)
{
}
union
{
double duration;
double speed;
};
WeightType type;
};
explicit InternalExtractorEdge()
: result(0, 0, 0, 0, false, false, false, false, false,
TRAVEL_MODE_INACCESSIBLE, false)
{ {
} }
explicit InternalExtractorEdge(NodeID start, explicit InternalExtractorEdge(NodeID source,
NodeID target, NodeID target,
short direction, NodeID name_id,
double speed, const WeightData& weight_data,
unsigned name_id, bool forward,
bool is_roundabout, bool backward,
bool is_in_tiny_cc, bool roundabout,
bool is_duration_set, bool in_tiny_cc,
bool is_access_restricted, bool access_restricted,
TravelMode travel_mode, TravelMode travel_mode,
bool is_split) bool is_split)
: start(start), target(target), speed(speed), name_id(name_id), direction(direction), : result(source, target, name_id, 0, forward, backward, roundabout,
is_roundabout(is_roundabout), is_in_tiny_cc(is_in_tiny_cc), in_tiny_cc, access_restricted, travel_mode, is_split),
is_duration_set(is_duration_set), is_access_restricted(is_access_restricted), weight_data(weight_data)
is_split(is_split), travel_mode(travel_mode)
{ {
} }
// data that will be written to disk
NodeBasedEdge result;
// intermediate edge weight
WeightData weight_data;
// coordinate of the source node
FixedPointCoordinate source_coordinate;
// necessary static util functions for stxxl's sorting // necessary static util functions for stxxl's sorting
static InternalExtractorEdge min_value() static InternalExtractorEdge min_value()
{ {
return InternalExtractorEdge(0, 0, 0, 0, 0, false, false, false, false, return InternalExtractorEdge(0, 0, 0, WeightData(), false, false, false,
TRAVEL_MODE_INACCESSIBLE, false); false, false, TRAVEL_MODE_INACCESSIBLE, false);
} }
static InternalExtractorEdge max_value() static InternalExtractorEdge max_value()
{ {
return InternalExtractorEdge(SPECIAL_NODEID, SPECIAL_NODEID, 0, 0, 0, false, false, false, return InternalExtractorEdge(SPECIAL_NODEID, SPECIAL_NODEID, 0, WeightData(), false, false,
false, TRAVEL_MODE_INACCESSIBLE, false); false, false, false, TRAVEL_MODE_INACCESSIBLE, false);
}
};
struct CmpEdgeByStartThenTargetID
{
using value_type = InternalExtractorEdge;
bool operator()(const InternalExtractorEdge &lhs, const InternalExtractorEdge &rhs) const
{
return (lhs.result.source < rhs.result.source) ||
((lhs.result.source == rhs.result.source) &&
(lhs.result.target < rhs.result.target));
} }
NodeID start; value_type max_value() { return InternalExtractorEdge::max_value(); }
NodeID target; value_type min_value() { return InternalExtractorEdge::min_value(); }
double speed;
unsigned name_id;
short direction;
bool is_roundabout : 1;
bool is_in_tiny_cc : 1;
bool is_duration_set : 1;
bool is_access_restricted : 1;
bool is_split : 1;
TravelMode travel_mode : 4;
FixedPointCoordinate source_coordinate;
FixedPointCoordinate target_coordinate;
}; };
struct CmpEdgeByStartID struct CmpEdgeByStartID
{ {
using value_type = InternalExtractorEdge; using value_type = InternalExtractorEdge;
bool operator()(const InternalExtractorEdge &a, const InternalExtractorEdge &b) const bool operator()(const InternalExtractorEdge &lhs, const InternalExtractorEdge &rhs) const
{ {
return a.start < b.start; return lhs.result.source < rhs.result.source;
} }
value_type max_value() { return InternalExtractorEdge::max_value(); } value_type max_value() { return InternalExtractorEdge::max_value(); }
value_type min_value() { return InternalExtractorEdge::min_value(); } value_type min_value() { return InternalExtractorEdge::min_value(); }
}; };
struct CmpEdgeByTargetID struct CmpEdgeByTargetID
{ {
using value_type = InternalExtractorEdge; using value_type = InternalExtractorEdge;
bool operator()(const InternalExtractorEdge &lhs, const InternalExtractorEdge &rhs) const
bool operator()(const InternalExtractorEdge &a, const InternalExtractorEdge &b) const
{ {
return a.target < b.target; return lhs.result.target < rhs.result.target;
} }
value_type max_value() { return InternalExtractorEdge::max_value(); } value_type max_value() { return InternalExtractorEdge::max_value(); }
value_type min_value() { return InternalExtractorEdge::min_value(); } value_type min_value() { return InternalExtractorEdge::min_value(); }
}; };

View File

@ -44,7 +44,7 @@ namespace
{ {
int lua_error_callback(lua_State *lua_state) int lua_error_callback(lua_State *lua_state)
{ {
luabind::object error_msg(luabind::from_stack(lua_state, -1)); std::string error_msg = lua_tostring(lua_state, -1);
std::ostringstream error_stream; std::ostringstream error_stream;
error_stream << error_msg; error_stream << error_msg;
throw osrm::exception("ERROR occured in profile script:\n" + error_stream.str()); throw osrm::exception("ERROR occured in profile script:\n" + error_stream.str());

View File

@ -53,7 +53,7 @@ auto get_value_by_key(T const &object, const char *key) -> decltype(object.get_v
int lua_error_callback(lua_State *L) // This is so I can use my own function as an int lua_error_callback(lua_State *L) // This is so I can use my own function as an
// exception handler, pcall_log() // exception handler, pcall_log()
{ {
luabind::object error_msg(luabind::from_stack(L, -1)); std::string error_msg = lua_tostring(L, -1);
std::ostringstream error_stream; std::ostringstream error_stream;
error_stream << error_msg; error_stream << error_msg;
throw osrm::exception("ERROR occured in profile script:\n" + error_stream.str()); throw osrm::exception("ERROR occured in profile script:\n" + error_stream.str());

View File

@ -120,7 +120,7 @@ Feature: Bike - Access tags on ways
| no | | | | | no | | | |
| private | | | | | private | | | |
| agricultural | | | | | agricultural | | | |
| forestery | | | | | forestry | | | |
| | yes | | x | | | yes | | x |
| | permissive | | x | | | permissive | | x |
| | designated | | x | | | designated | | x |
@ -128,7 +128,7 @@ Feature: Bike - Access tags on ways
| | no | | | | | no | | |
| | private | | | | | private | | |
| | agricultural | | | | | agricultural | | |
| | forestery | | | | | forestry | | |
| | | yes | x | | | | yes | x |
| | | permissive | x | | | | permissive | x |
| | | designated | x | | | | designated | x |
@ -136,7 +136,7 @@ Feature: Bike - Access tags on ways
| | | no | | | | | no | |
| | | private | | | | | private | |
| | | agricultural | | | | | agricultural | |
| | | forestery | | | | | forestry | |
Scenario: Bike - Access tags on both node and way Scenario: Bike - Access tags on both node and way
Then routability should be Then routability should be

View File

@ -7,20 +7,21 @@ Feature: Bike - Access tags on nodes
Scenario: Bike - Access tag hierachy on nodes Scenario: Bike - Access tag hierachy on nodes
Then routability should be Then routability should be
| node/access | node/vehicle | node/bicycle | bothw | | node/access | node/vehicle | node/bicycle | node/highway | bothw |
| | | | x | | | | | | x |
| yes | | | x | | yes | | | | x |
| no | | | | | no | | | | |
| | yes | | x | | | yes | | | x |
| | no | | | | | no | | | |
| no | yes | | x | | no | yes | | | x |
| yes | no | | | | yes | no | | | |
| | | yes | x | | | | yes | | x |
| | | no | | | | | no | | |
| no | | yes | x | | | | no | crossing | x |
| yes | | no | | | no | | yes | | x |
| | no | yes | x | | yes | | no | | |
| | yes | no | | | | no | yes | | x |
| | yes | no | | |
Scenario: Bike - Overwriting implied acccess on nodes doesn't overwrite way Scenario: Bike - Overwriting implied acccess on nodes doesn't overwrite way
Then routability should be Then routability should be
@ -45,7 +46,7 @@ Feature: Bike - Access tags on nodes
| no | | | | | no | | | |
| private | | | | | private | | | |
| agricultural | | | | | agricultural | | | |
| forestery | | | | | forestry | | | |
| | yes | | x | | | yes | | x |
| | permissive | | x | | | permissive | | x |
| | designated | | x | | | designated | | x |
@ -53,7 +54,7 @@ Feature: Bike - Access tags on nodes
| | no | | | | | no | | |
| | private | | | | | private | | |
| | agricultural | | | | | agricultural | | |
| | forestery | | | | | forestry | | |
| | | yes | x | | | | yes | x |
| | | permissive | x | | | | permissive | x |
| | | designated | x | | | | designated | x |
@ -61,4 +62,4 @@ Feature: Bike - Access tags on nodes
| | | no | | | | | no | |
| | | private | | | | | private | |
| | | agricultural | | | | | agricultural | |
| | | forestery | | | | | forestry | |

View File

@ -16,7 +16,7 @@ Feature: Bike - Surfaces
| cycleway | unpaved | 120s | | cycleway | unpaved | 120s |
| cycleway | fine_gravel | 120s | | cycleway | fine_gravel | 120s |
| cycleway | gravel | 120s | | cycleway | gravel | 120s |
| cycleway | pebbelstone | 120s | | cycleway | pebblestone | 120s |
| cycleway | dirt | 120s | | cycleway | dirt | 120s |
| cycleway | earth | 120s | | cycleway | earth | 120s |
| cycleway | grass | 120s | | cycleway | grass | 120s |

View File

@ -93,3 +93,22 @@ OSRM will use 4/5 of the projected free-flow speed.
| primary | 15 | | 30 | 60 | 34 km/h | 59 km/h | | primary | 15 | | 30 | 60 | 34 km/h | 59 km/h |
| primary | 15 | 3 | 30 | 60 | 15 km/h | 30 km/h | | primary | 15 | 3 | 30 | 60 | 15 km/h | 30 km/h |
Scenario: Car - Single lane streets be ignored or incur a penalty
Then routability should be
| highway | maxspeed | lanes | maxspeed:forward | maxspeed:backward | forw | backw |
| primary | | | | | 63 km/h | 63 km/h |
| primary | | 1 | | | 32 km/h | 32 km/h |
| primary | 60 | | | | 59 km/h | 59 km/h |
| primary | 60 | 1 | | | 30 km/h | 30 km/h |
| primary | | | 60 | | 59 km/h | 63 km/h |
| primary | | 1 | 60 | | 30 km/h | 32 km/h |
| primary | | | | 60 | 63 km/h | 59 km/h |
| primary | | 1 | | 60 | 32 km/h | 30 km/h |
| primary | 15 | | 60 | | 59 km/h | 23 km/h |
| primary | 15 | 1 | 60 | | 30 km/h | 7 km/h |
| primary | 15 | | | 60 | 23 km/h | 59 km/h |
| primary | 15 | 1 | | 60 | 7 km/h | 30 km/h |
| primary | 15 | | 30 | 60 | 34 km/h | 59 km/h |
| primary | 15 | 1 | 30 | 60 | 15 km/h | 30 km/h |

View File

@ -10,13 +10,13 @@ Feature: Car - Street names in instructions
| | c | | | c |
And the ways And the ways
| nodes | name | | nodes | name | ref |
| ab | My Way | | ab | My Way | |
| bc | Your Way | | bc | Your Way | A1 |
When I route I should get When I route I should get
| from | to | route | | from | to | route |
| a | c | My Way,Your Way | | a | c | My Way,Your Way (A1) |
@todo @todo
Scenario: Car - Use way type to describe unnamed ways Scenario: Car - Use way type to describe unnamed ways

View File

@ -51,7 +51,7 @@ Feature: Foot - Access tags on ways
| no | | | | no | | |
| private | | | | private | | |
| agricultural | | | | agricultural | | |
| forestery | | | | forestry | | |
| | yes | x | | | yes | x |
| | permissive | x | | | permissive | x |
| | designated | x | | | designated | x |
@ -59,7 +59,7 @@ Feature: Foot - Access tags on ways
| | no | | | | no | |
| | private | | | | private | |
| | agricultural | | | | agricultural | |
| | forestery | | | | forestry | |
Scenario: Foot - Access tags on both node and way Scenario: Foot - Access tags on both node and way
Then routability should be Then routability should be

View File

@ -39,7 +39,7 @@ Feature: Foot - Access tags on nodes
| no | | | | no | | |
| private | | | | private | | |
| agricultural | | | | agricultural | | |
| forestery | | | | forestry | | |
| no | yes | x | | no | yes | x |
| no | permissive | x | | no | permissive | x |
| no | designated | x | | no | designated | x |
@ -47,4 +47,4 @@ Feature: Foot - Access tags on nodes
| yes | no | | | yes | no | |
| yes | private | | | yes | private | |
| yes | agricultural | | | yes | agricultural | |
| yes | forestery | | | yes | forestry | |

View File

@ -10,6 +10,6 @@ Feature: Foot - Surfaces
| footway | | 145s ~10% | | footway | | 145s ~10% |
| footway | fine_gravel | 193s ~10% | | footway | fine_gravel | 193s ~10% |
| footway | gravel | 193s ~10% | | footway | gravel | 193s ~10% |
| footway | pebbelstone | 193s ~10% | | footway | pebblestone | 193s ~10% |
| footway | mud | 289s ~10% | | footway | mud | 289s ~10% |
| footway | sand | 289s ~10% | | footway | sand | 289s ~10% |

View File

@ -30,3 +30,9 @@ Feature: Foot - Accessability of different way types
| pier | x | | pier | x |
| cycleway | | | cycleway | |
| bridleway | | | bridleway | |
Scenario: Foot - Basic access
Then routability should be
| highway | leisure | forw |
| (nil) | track | x |

View File

@ -57,14 +57,13 @@ Feature: Locating Nearest node on a Way - pick closest way
Scenario: Nearest - High lat/lon Scenario: Nearest - High lat/lon
Given the node locations Given the node locations
| node | lat | lon | | node | lat | lon |
| a | -85 | -180 | | a | -85 | -180 |
| b | -85 | -160 | | b | -85 | -160 |
| c | -85 | -140 | | c | -85 | -140 |
| x | 75 | -180 | | x | -84.999 | -180 |
| y | 75 | -180 | | y | -84.999 | -160 |
| v | 1 | 1 | | z | -84.999 | -140 |
| w | -1 | -1 |
And the ways And the ways
| nodes | | nodes |
@ -73,6 +72,5 @@ Feature: Locating Nearest node on a Way - pick closest way
When I request nearest I should get When I request nearest I should get
| in | out | | in | out |
| x | a | | x | a |
| y | a | | y | b |
| v | c | | z | c |
| w | c |

View File

@ -17,7 +17,7 @@ Feature: osrm-prepare command line options: help
And stdout should contain "--profile" And stdout should contain "--profile"
And stdout should contain "--threads" And stdout should contain "--threads"
And stdout should contain 15 lines And stdout should contain 15 lines
And it should exit with code 0 And it should exit with code 1
Scenario: osrm-prepare - Help, short Scenario: osrm-prepare - Help, short
When I run "osrm-prepare -h" When I run "osrm-prepare -h"

View File

@ -174,3 +174,7 @@ end
Given /^data is loaded with datastore$/ do Given /^data is loaded with datastore$/ do
@load_method = 'datastore' @load_method = 'datastore'
end end
Given /^the HTTP method "([^"]*)"$/ do |method|
@http_method = method
end

View File

@ -52,5 +52,5 @@ When /^I request a travel time matrix I should get$/ do |table|
actual << r actual << r
end end
end end
table.routing_diff! actual table.diff! actual
end end

View File

@ -9,7 +9,7 @@ When /^I request locate I should get$/ do |table|
out_node = find_node_by_name row['out'] out_node = find_node_by_name row['out']
raise "*** unknown out-node '#{row['out']}" unless out_node raise "*** unknown out-node '#{row['out']}" unless out_node
response = request_locate("#{in_node.lat},#{in_node.lon}") response = request_locate(in_node)
if response.code == "200" && response.body.empty? == false if response.code == "200" && response.body.empty? == false
json = JSON.parse response.body json = JSON.parse response.body
if json['status'] == 0 if json['status'] == 0
@ -39,7 +39,7 @@ When /^I request locate I should get$/ do |table|
actual << got actual << got
end end
end end
table.routing_diff! actual table.diff! actual
end end
When /^I request locate (\d+) times I should get$/ do |n,table| When /^I request locate (\d+) times I should get$/ do |n,table|

View File

@ -91,6 +91,6 @@ When /^I match I should get$/ do |table|
actual << got actual << got
end end
end end
table.routing_diff! actual table.diff! actual
end end

View File

@ -9,7 +9,7 @@ When /^I request nearest I should get$/ do |table|
out_node = find_node_by_name row['out'] out_node = find_node_by_name row['out']
raise "*** unknown out-node '#{row['out']}" unless out_node raise "*** unknown out-node '#{row['out']}" unless out_node
response = request_nearest("#{in_node.lat},#{in_node.lon}") response = request_nearest(in_node)
if response.code == "200" && response.body.empty? == false if response.code == "200" && response.body.empty? == false
json = JSON.parse response.body json = JSON.parse response.body
if json['status'] == 0 if json['status'] == 0
@ -39,7 +39,7 @@ When /^I request nearest I should get$/ do |table|
actual << got actual << got
end end
end end
table.routing_diff! actual table.diff! actual
end end
When /^I request nearest (\d+) times I should get$/ do |n,table| When /^I request nearest (\d+) times I should get$/ do |n,table|

View File

@ -74,5 +74,5 @@ Then /^routability should be$/ do |table|
actual << output_row actual << output_row
end end
end end
table.routing_diff! actual table.diff! actual
end end

View File

@ -140,7 +140,7 @@ When /^I route I should get$/ do |table|
actual << got actual << got
end end
end end
table.routing_diff! actual table.diff! actual
end end
When /^I route (\d+) times I should get$/ do |n,table| When /^I route (\d+) times I should get$/ do |n,table|

View File

@ -1,76 +0,0 @@
#monkey patch cucumber table class to reorder output.
#we always want failed rows to be shown right below the expected row.
class Cucumber::Ast::Table
def routing_diff!(other_table, options={})
options = {:missing_row => true, :surplus_row => true, :missing_col => true, :surplus_col => false}.merge(options)
other_table = ensure_table(other_table)
other_table.convert_headers!
other_table.convert_columns!
ensure_green!
convert_headers!
convert_columns!
original_width = cell_matrix[0].length
other_table_cell_matrix = pad!(other_table.cell_matrix)
padded_width = cell_matrix[0].length
missing_col = cell_matrix[0].detect{|cell| cell.status == :undefined}
surplus_col = padded_width > original_width
require_diff_lcs
cell_matrix.extend(Diff::LCS)
changes = cell_matrix.diff(other_table_cell_matrix).flatten
inserted = 0
missing = 0
row_indices = Array.new(other_table_cell_matrix.length) {|n| n}
last_change = nil
missing_row_pos = nil
insert_row_pos = nil
changes.each do |change|
if(change.action == '-')
missing_row_pos = change.position + inserted
cell_matrix[missing_row_pos].each{|cell| cell.status = :undefined}
row_indices.insert(missing_row_pos, nil)
missing += 1
else # '+'
#change index so we interleave instead
insert_row_pos = change.position + inserted + 1
#insert_row_pos = change.position + missing #original
inserted_row = change.element
inserted_row.each{|cell| cell.status = :comment}
cell_matrix.insert(insert_row_pos, inserted_row)
row_indices[insert_row_pos] = nil
inspect_rows(cell_matrix[missing_row_pos], inserted_row) if last_change && last_change.action == '-'
inserted += 1
end
last_change = change
end
other_table_cell_matrix.each_with_index do |other_row, i|
row_index = row_indices.index(i)
row = cell_matrix[row_index] if row_index
if row
(original_width..padded_width).each do |col_index|
surplus_cell = other_row[col_index]
row[col_index].value = surplus_cell.value if row[col_index]
end
end
end
clear_cache!
should_raise =
missing_row_pos && options[:missing_row] ||
insert_row_pos && options[:surplus_row] ||
missing_col && options[:missing_col] ||
surplus_col && options[:surplus_col]
raise Different.new(self) if should_raise
end
end

View File

@ -22,10 +22,10 @@ SHUTDOWN_TIMEOUT = 10
DEFAULT_LOAD_METHOD = 'datastore' DEFAULT_LOAD_METHOD = 'datastore'
OSRM_ROUTED_LOG_FILE = 'osrm-routed.log' OSRM_ROUTED_LOG_FILE = 'osrm-routed.log'
if ENV['OS']==/Windows.*/ then if ENV['OS']=~/Windows.*/ then
TERMSIGNAL='TERM'
else
TERMSIGNAL=9 TERMSIGNAL=9
else
TERMSIGNAL='TERM'
end end
@ -72,8 +72,8 @@ end
def verify_existance_of_binaries def verify_existance_of_binaries
["osrm-extract", "osrm-prepare", "osrm-routed"].each do |bin| ["osrm-extract", "osrm-prepare", "osrm-routed"].each do |bin|
unless File.exists? "#{BIN_PATH}/#{bin}" unless File.exists? "#{BIN_PATH}/#{bin}#{EXE}"
raise "*** #{BIN_PATH}/#{bin} is missing. Build failed?" raise "*** #{BIN_PATH}/#{bin}#{EXE} is missing. Build failed?"
end end
end end
end end

View File

@ -4,22 +4,16 @@ STRESS_TIMEOUT = 300
Before do |scenario| Before do |scenario|
# feature name # fetch scenario and feature name, so we can use it in log files if needed
case scenario case scenario
when Cucumber::Ast::Scenario when Cucumber::RunningTestCase::Scenario
@feature_name = scenario.feature.name @feature_name = scenario.feature.name
when Cucumber::Ast::OutlineTable::ExampleRow
@feature_name = scenario.scenario_outline.feature.name
end
# scenario name
case scenario
when Cucumber::Ast::Scenario
@scenario_title = scenario.name @scenario_title = scenario.name
when Cucumber::Ast::OutlineTable::ExampleRow when Cucumber::RunningTestCase::ExampleRow
@feature_name = scenario.scenario_outline.feature.name
@scenario_title = scenario.scenario_outline.name @scenario_title = scenario.scenario_outline.name
end end
@load_method = DEFAULT_LOAD_METHOD @load_method = DEFAULT_LOAD_METHOD
@query_params = {} @query_params = {}
@scenario_time = Time.now.strftime("%Y-%m-%dT%H:%m:%SZ") @scenario_time = Time.now.strftime("%Y-%m-%dT%H:%m:%SZ")

34
features/support/http.rb Normal file
View File

@ -0,0 +1,34 @@
require 'net/http'
def generate_request_url path
if @http_method.eql? "POST"
pos = path.index('?') - 1
service = path[0..pos]
uri = URI.parse "#{HOST}/#{service}"
else
uri = URI.parse "#{HOST}/#{path}"
end
end
def send_request uri, waypoints=[], options={}, timestamps=[]
@query = uri.to_s
Timeout.timeout(OSRM_TIMEOUT) do
if @http_method.eql? "POST"
datas = {}
if waypoints.length > 0
datas[:loc] = waypoints.compact.map { |w| "#{w.lat},#{w.lon}" }
end
if timestamps.length > 0
datas[:t] = timestamps.compact.map { |t| "#{t}" }
end
datas.merge! options
response = Net::HTTP.post_form uri, datas
else
response = Net::HTTP.get_response uri
end
end
rescue Errno::ECONNREFUSED => e
raise "*** osrm-routed is not running."
rescue Timeout::Error
raise "*** osrm-routed did not respond."
end

View File

@ -1,17 +1,12 @@
require 'net/http' require 'net/http'
def request_locate_url path def request_locate_url path, node
@query = path @query = path
uri = URI.parse "#{HOST}/#{path}"
Timeout.timeout(OSRM_TIMEOUT) do uri = generate_request_url path
Net::HTTP.get_response uri response = send_request uri, [node]
end
rescue Errno::ECONNREFUSED => e
raise "*** osrm-routed is not running."
rescue Timeout::Error
raise "*** osrm-routed did not respond."
end end
def request_locate a def request_locate node
request_locate_url "locate?loc=#{a}" request_locate_url "locate?loc=#{node.lat},#{node.lon}", node
end end

View File

@ -13,14 +13,8 @@ def request_matching trace=[], timestamps=[], options={}
end end
params = (trace_params + defaults.merge(options).to_param).join('&') params = (trace_params + defaults.merge(options).to_param).join('&')
params = nil if params=="" params = nil if params==""
uri = URI.parse ["#{HOST}/match", params].compact.join('?')
@query = uri.to_s uri = generate_request_url ("match" + '?' + params)
Timeout.timeout(OSRM_TIMEOUT) do response = send_request uri, trace, options, timestamps
Net::HTTP.get_response uri
end
rescue Errno::ECONNREFUSED => e
raise "*** osrm-routed is not running."
rescue Timeout::Error
raise "*** osrm-routed did not respond."
end end

View File

@ -1,17 +1,12 @@
require 'net/http' require 'net/http'
def request_nearest_url path def request_nearest_url path, node
@query = path @query = path
uri = URI.parse "#{HOST}/#{path}"
Timeout.timeout(OSRM_TIMEOUT) do uri = generate_request_url path
Net::HTTP.get_response uri response = send_request uri, [node]
end
rescue Errno::ECONNREFUSED => e
raise "*** osrm-routed is not running."
rescue Timeout::Error
raise "*** osrm-routed did not respond."
end end
def request_nearest a def request_nearest node
request_nearest_url "nearest?loc=#{a}" request_nearest_url "nearest?loc=#{node.lat},#{node.lon}", node
end end

View File

@ -15,15 +15,13 @@ def request_path path, waypoints=[], options={}
locs = waypoints.compact.map { |w| "loc=#{w.lat},#{w.lon}" } locs = waypoints.compact.map { |w| "loc=#{w.lat},#{w.lon}" }
params = (locs + options.to_param).join('&') params = (locs + options.to_param).join('&')
params = nil if params=="" params = nil if params==""
uri = URI.parse ["#{HOST}/#{path}", params].compact.join('?')
@query = uri.to_s if params == nil
Timeout.timeout(OSRM_TIMEOUT) do uri = generate_request_url (path)
Net::HTTP.get_response uri else
uri = generate_request_url (path + '?' + params)
end end
rescue Errno::ECONNREFUSED => e response = send_request uri, waypoints, options
raise "*** osrm-routed is not running."
rescue Timeout::Error
raise "*** osrm-routed did not respond."
end end
def request_url path def request_url path

View File

@ -173,3 +173,33 @@ Feature: Testbot - Handle ferry routes
| from | to | route | time | | from | to | route | time |
| a | g | abcdefg | 23400s +-2 | | a | g | abcdefg | 23400s +-2 |
| g | a | abcdefg | 23400s +-2 | | g | a | abcdefg | 23400s +-2 |
@todo
Scenario: Testbot - Ferry duration formats
Given the node map
| a | c | e | g | i | k | m | o | q | s |
| b | d | f | h | j | l | n | p | r | t |
And the ways
| nodes | route | duration |
| ab | ferry | 0:01 |
| cd | ferry | 00:01 |
| ef | ferry | 1:00 |
| gh | ferry | 01:00 |
| ij | ferry | 02:20 |
| kl | ferry | 10:00 |
| mn | ferry | 100:00 |
| op | ferry | 1000:00 |
| qr | ferry | 10000:00 |
When I route I should get
| from | to | route | time |
| a | b | ab | 60s +-1 |
| c | d | cd | 60s +-1 |
| e | f | ef | 3600s +-1 |
| g | h | gh | 3600s +-1 |
| i | j | ij | 8400s +-1 |
| k | l | kl | 36000s +-1 |
| m | n | mn | 360000s +-1 |
| o | p | mn | 3600000s +-1 |
| q | r | mn | 36000000s +-1 |

View File

@ -4,6 +4,25 @@ Feature: Avoid weird loops caused by rounding errors
Background: Background:
Given the profile "testbot" Given the profile "testbot"
Scenario: Weired sidestreet loops
Given the node map
| a | 1 | b | 2 | c | 3 | d |
| | | | | | | |
| e | | f | | g | | h |
And the ways
| nodes |
| aefghd |
| abcd |
| bf |
| cg |
When I route I should get
| waypoints | route | turns |
| a,1,d | abcd,abcd | head,via,destination |
| a,2,d | abcd,abcd | head,via,destination |
| a,3,d | abcd,abcd | head,via,destination |
Scenario: Avoid weird loops 1 Scenario: Avoid weird loops 1
Given the node locations Given the node locations
| node | lat | lon | | node | lat | lon |

View File

@ -0,0 +1,100 @@
@post @testbot
Feature: POST request
Background:
Given the profile "testbot"
And the HTTP method "POST"
Scenario: Testbot - viaroute POST request
Given the node locations
| node | lat | lon |
| a | 55.68740 | 12.52430 |
| b | 55.68745 | 12.52409 |
| c | 55.68711 | 12.52383 |
| x | -55.68740 | 12.52430 |
| y | -55.68745 | 12.52409 |
| z | -55.68711 | 12.52383 |
And the ways
| nodes |
| ab |
| bc |
| xy |
| yz |
When I route I should get
| from | to | route | turns |
| a | c | ab,bc | head,left,destination |
| c | a | bc,ab | head,right,destination |
| x | z | xy,yz | head,right,destination |
| z | x | yz,xy | head,left,destination |
Scenario: Testbot - match POST request
Given the node map
| a | b | c | d |
| e | f | g | h |
And the ways
| nodes | oneway |
| abcd | yes |
| hgfe | yes |
When I match I should get
| trace | matchings |
| dcba | hgfe |
Scenario: Testbot - table POST request
Given the node map
| x | a | b | y |
| | d | e | |
And the ways
| nodes | oneway |
| abeda | yes |
| xa | |
| by | |
When I request a travel time matrix I should get
| | x | y | d | e |
| x | 0 | 300 | 400 | 300 |
| y | 500 | 0 | 300 | 200 |
| d | 200 | 300 | 0 | 300 |
| e | 300 | 400 | 100 | 0 |
Scenario: Testbot - locate POST request
Given the node locations
| node | lat | lon |
| a | -85 | -180 |
| b | 0 | 0 |
| c | 85 | 180 |
| x | -84 | -180 |
| y | 84 | 180 |
And the ways
| nodes |
| abc |
When I request locate I should get
| in | out |
| x | a |
| y | c |
Scenario: Testbot - nearest POST request
Given the node locations
| node | lat | lon |
| a | -85 | -180 |
| b | -85 | -160 |
| c | -85 | -140 |
| x | -84.999 | -180 |
| y | -84.999 | -160 |
| z | -84.999 | -140 |
And the ways
| nodes |
| abc |
When I request nearest I should get
| in | out |
| x | a |
| y | b |
| z | c |

View File

@ -98,30 +98,26 @@ Feature: Snap start/end point to the nearest way
| b | x | xb | | b | x | xb |
| c | x | xc | | c | x | xc |
Scenario: Find edges within 1km, and the same from 10km Scenario: Find edges within 100m, and the same from 1km
Given a grid size of 1000 meters Given a grid size of 100 meters
Given the node map Given the node map
| p | | | | | | | | | | | i | | | | | | | | | | | j | | p | | | | | | | | i | | | | | | | | j |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | 8 | | 1 | | 2 | | | | | | |
| | | | | | | | | | 8 | | 1 | | 2 | | | | | | | | | | | | | | | | | | h | a | b | | | | | | | |
| | | | | | | | | | | h | a | b | | | | | | | | | | | | o | | | | | | 7 | g | x | c | 3 | | | | | | k |
| o | | | | | | | | | 7 | g | x | c | 3 | | | | | | | | | k | | | | | | | | | f | e | d | | | | | | | |
| | | | | | | | | | | f | e | d | | | | | | | | | | | | | | | | | | 6 | | 5 | | 4 | | | | | | |
| | | | | | | | | | 6 | | 5 | | 4 | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | | | n | | | | | | | | m | | | | | | | | l |
| | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | |
| n | | | | | | | | | | | m | | | | | | | | | | | l |
Given the ways Given the ways
| nodes | | nodes |

View File

@ -52,6 +52,21 @@ Feature: Via points
| a,c,f | ab,bcd,bcd,de,efg | | a,c,f | ab,bcd,bcd,de,efg |
| a,c,f,h | ab,bcd,bcd,de,efg,efg,gh | | a,c,f,h | ab,bcd,bcd,de,efg,efg,gh |
Scenario: Duplicate via point
Given the node map
| x | | | | | |
| a | 1 | 2 | 3 | 4 | b |
| | | | | | |
And the ways
| nodes |
| xa |
| ab |
When I route I should get
| waypoints | route | turns |
| 1,1,4 | ab,ab | head,via,destination |
Scenario: Via points on ring of oneways Scenario: Via points on ring of oneways
# xa it to avoid only having a single ring, which cna trigger edge cases # xa it to avoid only having a single ring, which cna trigger edge cases
Given the node map Given the node map
@ -77,7 +92,6 @@ Feature: Via points
| 1,3,2 | ab,bc,cd,cd,de,ef,fa,ab,bc | 1600m +-1 | head,straight,straight,via,right,right,right,right,straight,destination | | 1,3,2 | ab,bc,cd,cd,de,ef,fa,ab,bc | 1600m +-1 | head,straight,straight,via,right,right,right,right,straight,destination |
| 3,2,1 | cd,de,ef,fa,ab,bc,bc,cd,de,ef,fa,ab | 2400m +-1 | head,right,right,right,right,straight,via,straight,right,right,right,right,destination | | 3,2,1 | cd,de,ef,fa,ab,bc,bc,cd,de,ef,fa,ab | 2400m +-1 | head,right,right,right,right,straight,via,straight,right,right,right,right,destination |
@bug
Scenario: Via points on ring on the same oneway Scenario: Via points on ring on the same oneway
# xa it to avoid only having a single ring, which cna trigger edge cases # xa it to avoid only having a single ring, which cna trigger edge cases
Given the node map Given the node map
@ -97,6 +111,6 @@ Feature: Via points
| waypoints | route | distance | turns | | waypoints | route | distance | turns |
| 1,3 | ab | 200m +-1 | head,destination | | 1,3 | ab | 200m +-1 | head,destination |
| 3,1 | ab,bc,cd,da,ab | 800m +-1 | head,right,right,right,right,destination | | 3,1 | ab,bc,cd,da,ab | 800m +-1 | head,right,right,right,right,destination |
| 1,2,3 | ab | 200m +-1 | head,destination | | 1,2,3 | ab,ab | 200m +-1 | head,via,destination |
| 1,3,2 | ab,bc,cd,da,ab | 1100m +-1 | head,right,right,right,right,destination | | 1,3,2 | ab,ab,bc,cd,da,ab | 1100m +-1 | head,via,right,right,right,right,destination |
| 3,2,1 | ab,bc,cd,da,ab,bc,cd,da,ab | 1600m +-1 | head,right,right,right,right,right,right,right,right,destination | | 3,2,1 | ab,bc,cd,da,ab,ab,bc,cd,da,ab | 1800m | head,right,right,right,right,via,right,right,right,right,destination |

View File

@ -78,6 +78,8 @@ struct RouteParameters
void setCompressionFlag(const bool flag); void setCompressionFlag(const bool flag);
void addCoordinate(const boost::fusion::vector<double, double> &received_coordinates); void addCoordinate(const boost::fusion::vector<double, double> &received_coordinates);
void getCoordinatesFromGeometry(const std::string geometry_string);
short zoom_level; short zoom_level;
bool print_instructions; bool print_instructions;

View File

@ -26,10 +26,13 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#include "contractor/processing_chain.hpp" #include "contractor/processing_chain.hpp"
#include "contractor/contractor_options.hpp"
#include "util/simple_logger.hpp" #include "util/simple_logger.hpp"
#include <boost/program_options/errors.hpp> #include <boost/program_options/errors.hpp>
#include <tbb/task_scheduler_init.h>
#include <exception> #include <exception>
#include <ostream> #include <ostream>
@ -37,21 +40,64 @@ int main(int argc, char *argv[])
{ {
try try
{ {
return Prepare().Process(argc, argv); LogPolicy::GetInstance().Unmute();
} ContractorConfig contractor_config;
catch (boost::program_options::too_many_positional_options_error &)
{ const return_code result = ContractorOptions::ParseArguments(argc, argv, contractor_config);
SimpleLogger().Write(logWARNING) << "Only one file can be specified";
return 1; if (return_code::fail == result)
} {
catch (boost::program_options::error &e) return 1;
{ }
SimpleLogger().Write(logWARNING) << e.what();
return 1; if (return_code::exit == result)
{
return 0;
}
ContractorOptions::GenerateOutputFilesNames(contractor_config);
if (1 > contractor_config.requested_num_threads)
{
SimpleLogger().Write(logWARNING) << "Number of threads must be 1 or larger";
return 1;
}
const unsigned recommended_num_threads = tbb::task_scheduler_init::default_num_threads();
if (recommended_num_threads != contractor_config.requested_num_threads)
{
SimpleLogger().Write(logWARNING) << "The recommended number of threads is "
<< recommended_num_threads
<< "! This setting may have performance side-effects.";
}
if (!boost::filesystem::is_regular_file(contractor_config.osrm_input_path))
{
SimpleLogger().Write(logWARNING)
<< "Input file " << contractor_config.osrm_input_path.string() << " not found!";
return 1;
}
if (!boost::filesystem::is_regular_file(contractor_config.profile_path))
{
SimpleLogger().Write(logWARNING) << "Profile " << contractor_config.profile_path.string()
<< " not found!";
return 1;
}
SimpleLogger().Write() << "Input file: " << contractor_config.osrm_input_path.filename().string();
SimpleLogger().Write() << "Restrictions file: " << contractor_config.restrictions_path.filename().string();
SimpleLogger().Write() << "Profile: " << contractor_config.profile_path.filename().string();
SimpleLogger().Write() << "Threads: " << contractor_config.requested_num_threads;
tbb::task_scheduler_init init(contractor_config.requested_num_threads);
return Prepare(contractor_config).Run();
} }
catch (const std::exception &e) catch (const std::exception &e)
{ {
SimpleLogger().Write(logWARNING) << "Exception occured: " << e.what() << std::endl; SimpleLogger().Write(logWARNING) << "[exception] " << e.what();
return 1; return 1;
} }
} }

View File

@ -1,19 +1,21 @@
require("lib/access") -- Bicycle profile
require("lib/maxspeed")
local find_access_tag = require("lib/access").find_access_tag
local limit = require("lib/maxspeed").limit
-- Begin of globals -- Begin of globals
barrier_whitelist = { [""] = true, ["cycle_barrier"] = true, ["bollard"] = true, ["entrance"] = true, ["cattle_grid"] = true, ["border_control"] = true, ["toll_booth"] = true, ["sally_port"] = true, ["gate"] = true, ["no"] = true } barrier_whitelist = { [""] = true, ["cycle_barrier"] = true, ["bollard"] = true, ["entrance"] = true, ["cattle_grid"] = true, ["border_control"] = true, ["toll_booth"] = true, ["sally_port"] = true, ["gate"] = true, ["no"] = true }
access_tag_whitelist = { ["yes"] = true, ["permissive"] = true, ["designated"] = true } access_tag_whitelist = { ["yes"] = true, ["permissive"] = true, ["designated"] = true }
access_tag_blacklist = { ["no"] = true, ["private"] = true, ["agricultural"] = true, ["forestery"] = true } access_tag_blacklist = { ["no"] = true, ["private"] = true, ["agricultural"] = true, ["forestry"] = true }
access_tag_restricted = { ["destination"] = true, ["delivery"] = true } access_tag_restricted = { ["destination"] = true, ["delivery"] = true }
access_tags_hierachy = { "bicycle", "vehicle", "access" } access_tags_hierachy = { "bicycle", "vehicle", "access" }
cycleway_tags = {["track"]=true,["lane"]=true,["opposite"]=true,["opposite_lane"]=true,["opposite_track"]=true,["share_busway"]=true,["sharrow"]=true,["shared"]=true } cycleway_tags = {["track"]=true,["lane"]=true,["opposite"]=true,["opposite_lane"]=true,["opposite_track"]=true,["share_busway"]=true,["sharrow"]=true,["shared"]=true }
service_tag_restricted = { ["parking_aisle"] = true } service_tag_restricted = { ["parking_aisle"] = true }
restriction_exception_tags = { "bicycle", "vehicle", "access" } restriction_exception_tags = { "bicycle", "vehicle", "access" }
unsafe_highway_list = { ["primary"] = true, ["secondary"] = true, ["tertiary"] = true, ["primary_link"] = true, ["secondary_link"] = true, ["tertiary_link"] = true}
default_speed = 15 local default_speed = 15
local walking_speed = 6
walking_speed = 6
bicycle_speeds = { bicycle_speeds = {
["cycleway"] = default_speed, ["cycleway"] = default_speed,
@ -79,8 +81,7 @@ surface_speeds = {
["unpaved"] = 6, ["unpaved"] = 6,
["fine_gravel"] = 6, ["fine_gravel"] = 6,
["gravel"] = 6, ["gravel"] = 6,
["fine_gravel"] = 6, ["pebblestone"] = 6,
["pebbelstone"] = 6,
["ground"] = 6, ["ground"] = 6,
["dirt"] = 6, ["dirt"] = 6,
["earth"] = 6, ["earth"] = 6,
@ -89,24 +90,27 @@ surface_speeds = {
["sand"] = 3 ["sand"] = 3
} }
take_minimum_of_speeds = true traffic_signal_penalty = 2
obey_oneway = true use_turn_restrictions = false
obey_bollards = false
use_restrictions = true
ignore_areas = true -- future feature
traffic_signal_penalty = 5
u_turn_penalty = 20
use_turn_restrictions = false
turn_penalty = 60
turn_bias = 1.4
local obey_oneway = true
local obey_bollards = false
local ignore_areas = true
local u_turn_penalty = 20
local turn_penalty = 60
local turn_bias = 1.4
-- reduce the driving speed by 30% for unsafe roads
-- local safety_penalty = 0.7
local safety_penalty = 1.0
local use_public_transport = true
local fallback_names = true
--modes --modes
mode_normal = 1 local mode_normal = 1
mode_pushing = 2 local mode_pushing = 2
mode_ferry = 3 local mode_ferry = 3
mode_train = 4 local mode_train = 4
mode_movable_bridge = 5 local mode_movable_bridge = 5
local function parse_maxspeed(source) local function parse_maxspeed(source)
if not source then if not source then
@ -122,7 +126,6 @@ local function parse_maxspeed(source)
return n return n
end end
function get_exceptions(vector) function get_exceptions(vector)
for i,v in ipairs(restriction_exception_tags) do for i,v in ipairs(restriction_exception_tags) do
vector:Add(v) vector:Add(v)
@ -130,29 +133,31 @@ function get_exceptions(vector)
end end
function node_function (node, result) function node_function (node, result)
local barrier = node:get_value_by_key("barrier") -- parse access and barrier tags
local access = Access.find_access_tag(node, access_tags_hierachy) local highway = node:get_value_by_key("highway")
local traffic_signal = node:get_value_by_key("highway") local is_crossing = highway and highway == "crossing"
-- flag node if it carries a traffic light local access = find_access_tag(node, access_tags_hierachy)
if traffic_signal and traffic_signal == "traffic_signals" then if access and access ~= "" then
result.traffic_lights = true -- access restrictions on crossing nodes are not relevant for
end -- the traffic on the road
if access_tag_blacklist[access] and not is_crossing then
result.barrier = true
end
else
local barrier = node:get_value_by_key("barrier")
if barrier and "" ~= barrier then
if not barrier_whitelist[barrier] then
result.barrier = true
end
end
end
-- parse access and barrier tags -- check if node is a traffic light
if access and access ~= "" then local tag = node:get_value_by_key("highway")
if access_tag_blacklist[access] then if tag and "traffic_signals" == tag then
result.barrier = true result.traffic_lights = true;
else end
result.barrier = false
end
elseif barrier and barrier ~= "" then
if barrier_whitelist[barrier] then
result.barrier = false
else
result.barrier = true
end
end
end end
function way_function (way, result) function way_function (way, result)
@ -165,8 +170,8 @@ function way_function (way, result)
local public_transport = way:get_value_by_key("public_transport") local public_transport = way:get_value_by_key("public_transport")
local bridge = way:get_value_by_key("bridge") local bridge = way:get_value_by_key("bridge")
if (not highway or highway == '') and if (not highway or highway == '') and
(not route or route == '') and (not use_public_transport or not route or route == '') and
(not railway or railway=='') and (not use_public_transport or not railway or railway=='') and
(not amenity or amenity=='') and (not amenity or amenity=='') and
(not man_made or man_made=='') and (not man_made or man_made=='') and
(not public_transport or public_transport=='') and (not public_transport or public_transport=='') and
@ -181,7 +186,7 @@ function way_function (way, result)
end end
-- access -- access
local access = Access.find_access_tag(way, access_tags_hierachy) local access = find_access_tag(way, access_tags_hierachy)
if access and access_tag_blacklist[access] then if access and access_tag_blacklist[access] then
return return
end end
@ -213,7 +218,8 @@ function way_function (way, result)
result.name = ref result.name = ref
elseif name and "" ~= name then elseif name and "" ~= name then
result.name = name result.name = name
elseif highway then -- TODO find a better solution for encoding way type
elseif fallback_names and highway then
-- if no name exists, use way type -- if no name exists, use way type
-- this encoding scheme is excepted to be a temporary solution -- this encoding scheme is excepted to be a temporary solution
result.name = "{highway:"..highway.."}" result.name = "{highway:"..highway.."}"
@ -246,15 +252,16 @@ function way_function (way, result)
result.forward_speed = route_speeds[route] result.forward_speed = route_speeds[route]
result.backward_speed = route_speeds[route] result.backward_speed = route_speeds[route]
end end
elseif railway and platform_speeds[railway] then -- public transport
elseif use_public_transport and railway and platform_speeds[railway] then
-- railway platforms (old tagging scheme) -- railway platforms (old tagging scheme)
result.forward_speed = platform_speeds[railway] result.forward_speed = platform_speeds[railway]
result.backward_speed = platform_speeds[railway] result.backward_speed = platform_speeds[railway]
elseif platform_speeds[public_transport] then elseif use_public_transport and platform_speeds[public_transport] then
-- public_transport platforms (new tagging platform) -- public_transport platforms (new tagging platform)
result.forward_speed = platform_speeds[public_transport] result.forward_speed = platform_speeds[public_transport]
result.backward_speed = platform_speeds[public_transport] result.backward_speed = platform_speeds[public_transport]
elseif railway and railway_speeds[railway] then elseif use_public_transport and railway and railway_speeds[railway] then
result.forward_mode = mode_train result.forward_mode = mode_train
result.backward_mode = mode_train result.backward_mode = mode_train
-- railways -- railways
@ -270,6 +277,10 @@ function way_function (way, result)
-- regular ways -- regular ways
result.forward_speed = bicycle_speeds[highway] result.forward_speed = bicycle_speeds[highway]
result.backward_speed = bicycle_speeds[highway] result.backward_speed = bicycle_speeds[highway]
if safety_penalty < 1 and unsafe_highway_list[highway] then
result.forward_speed = result.forward_speed * safety_penalty
result.backward_speed = result.backward_speed * safety_penalty
end
elseif access and access_tag_whitelist[access] then elseif access and access_tag_whitelist[access] then
-- unknown way, but valid access tag -- unknown way, but valid access tag
result.forward_speed = default_speed result.forward_speed = default_speed
@ -391,7 +402,7 @@ function way_function (way, result)
end end
-- maxspeed -- maxspeed
MaxSpeed.limit( result, maxspeed, maxspeed_forward, maxspeed_backward ) limit( result, maxspeed, maxspeed_forward, maxspeed_backward )
end end
function turn_function (angle) function turn_function (angle)

View File

@ -1,6 +1,8 @@
-- Begin of globals -- Car profile
--require("lib/access") --function temporarily inlined
local find_access_tag = require("lib/access").find_access_tag
-- Begin of globals
barrier_whitelist = { ["cattle_grid"] = true, ["border_control"] = true, ["checkpoint"] = true, ["toll_booth"] = true, ["sally_port"] = true, ["gate"] = true, ["lift_gate"] = true, ["no"] = true, ["entrance"] = true } barrier_whitelist = { ["cattle_grid"] = true, ["border_control"] = true, ["checkpoint"] = true, ["toll_booth"] = true, ["sally_port"] = true, ["gate"] = true, ["lift_gate"] = true, ["no"] = true, ["entrance"] = true }
access_tag_whitelist = { ["yes"] = true, ["motorcar"] = true, ["motor_vehicle"] = true, ["vehicle"] = true, ["permissive"] = true, ["designated"] = true } access_tag_whitelist = { ["yes"] = true, ["motorcar"] = true, ["motor_vehicle"] = true, ["vehicle"] = true, ["permissive"] = true, ["designated"] = true }
access_tag_blacklist = { ["no"] = true, ["private"] = true, ["agricultural"] = true, ["forestry"] = true, ["emergency"] = true, ["psv"] = true } access_tag_blacklist = { ["no"] = true, ["private"] = true, ["agricultural"] = true, ["forestry"] = true, ["emergency"] = true, ["psv"] = true }
@ -130,10 +132,9 @@ maxspeed_table = {
traffic_signal_penalty = 2 traffic_signal_penalty = 2
use_turn_restrictions = true use_turn_restrictions = true
local take_minimum_of_speeds = false
local obey_oneway = true local obey_oneway = true
local obey_bollards = true local obey_bollards = true
local ignore_areas = true -- future feature local ignore_areas = true
local u_turn_penalty = 20 local u_turn_penalty = 20
local abs = math.abs local abs = math.abs
@ -147,16 +148,6 @@ local mode_normal = 1
local mode_ferry = 2 local mode_ferry = 2
local mode_movable_bridge = 3 local mode_movable_bridge = 3
local function find_access_tag(source, access_tags_hierachy)
for i,v in ipairs(access_tags_hierachy) do
local access_tag = source:get_value_by_key(v)
if access_tag and "" ~= access_tag then
return access_tag
end
end
return ""
end
function get_exceptions(vector) function get_exceptions(vector)
for i,v in ipairs(restriction_exception_tags) do for i,v in ipairs(restriction_exception_tags) do
vector:Add(v) vector:Add(v)
@ -187,6 +178,7 @@ local function parse_maxspeed(source)
return n return n
end end
-- FIXME Why was this commented out?
-- function turn_function (angle) -- function turn_function (angle)
-- -- print ("called at angle " .. angle ) -- -- print ("called at angle " .. angle )
-- local index = math.abs(math.floor(angle/10+0.5))+1 -- +1 'coz LUA starts as idx 1 -- local index = math.abs(math.floor(angle/10+0.5))+1 -- +1 'coz LUA starts as idx 1
@ -198,7 +190,7 @@ end
function node_function (node, result) function node_function (node, result)
-- parse access and barrier tags -- parse access and barrier tags
local access = find_access_tag(node, access_tags_hierachy) local access = find_access_tag(node, access_tags_hierachy)
if access ~= "" then if access and access ~= "" then
if access_tag_blacklist[access] then if access_tag_blacklist[access] then
result.barrier = true result.barrier = true
end end
@ -249,12 +241,6 @@ function way_function (way, result)
return return
end end
local width = math.huge
local width_string = way:get_value_by_key("width")
if width_string and tonumber(width_string:match("%d*")) then
width = tonumber(width_string:match("%d*"))
end
-- Check if we are allowed to access the way -- Check if we are allowed to access the way
local access = find_access_tag(way, access_tags_hierachy) local access = find_access_tag(way, access_tags_hierachy)
if access_tag_blacklist[access] then if access_tag_blacklist[access] then
@ -353,9 +339,14 @@ function way_function (way, result)
local service = way:get_value_by_key("service") local service = way:get_value_by_key("service")
-- Set the name that will be used for instructions -- Set the name that will be used for instructions
if ref and "" ~= ref then local has_ref = ref and "" ~= ref
local has_name = name and "" ~= name
if has_name and has_ref then
result.name = name .. " (" .. ref .. ")"
elseif has_ref then
result.name = ref result.name = ref
elseif name and "" ~= name then elseif has_name then
result.name = name result.name = name
-- else -- else
-- result.name = highway -- if no name exists, use way type -- result.name = highway -- if no name exists, use way type
@ -407,12 +398,25 @@ function way_function (way, result)
result.ignore_in_grid = true result.ignore_in_grid = true
end end
local width = math.huge
local lanes = math.huge
if result.forward_speed > 0 or result.backward_speed > 0 then
local width_string = way:get_value_by_key("width")
if width_string and tonumber(width_string:match("%d*")) then
width = tonumber(width_string:match("%d*"))
end
local lanes_string = way:get_value_by_key("lanes")
if lanes_string and tonumber(lanes_string:match("%d*")) then
lanes = tonumber(lanes_string:match("%d*"))
end
end
-- scale speeds to get better avg driving times -- scale speeds to get better avg driving times
if result.forward_speed > 0 then if result.forward_speed > 0 then
local scaled_speed = result.forward_speed*speed_reduction + 11; local scaled_speed = result.forward_speed*speed_reduction + 11;
local penalized_speed = math.huge local penalized_speed = math.huge
if width <= 3 then if width <= 3 or lanes <= 1 then
penalized_speed = result.forward_speed / 2; penalized_speed = result.forward_speed / 2;
end end
result.forward_speed = math.min(penalized_speed, scaled_speed) result.forward_speed = math.min(penalized_speed, scaled_speed)
@ -421,16 +425,10 @@ function way_function (way, result)
if result.backward_speed > 0 then if result.backward_speed > 0 then
local scaled_speed = result.backward_speed*speed_reduction + 11; local scaled_speed = result.backward_speed*speed_reduction + 11;
local penalized_speed = math.huge local penalized_speed = math.huge
if width <= 3 then if width <= 3 or lanes <= 1 then
penalized_speed = result.backward_speed / 2; penalized_speed = result.backward_speed / 2;
end end
result.backward_speed = math.min(penalized_speed, scaled_speed) result.backward_speed = math.min(penalized_speed, scaled_speed)
end end
end end
-- These are wrappers to parse vectors of nodes and ways and thus to speed up any tracing JIT
function node_vector_function(vector)
for v in vector.nodes do
node_function(v)
end
end

View File

@ -1,10 +1,11 @@
-- Foot profile -- Foot profile
require("lib/access") local find_access_tag = require("lib/access").find_access_tag
-- Begin of globals
barrier_whitelist = { [""] = true, ["cycle_barrier"] = true, ["bollard"] = true, ["entrance"] = true, ["cattle_grid"] = true, ["border_control"] = true, ["toll_booth"] = true, ["sally_port"] = true, ["gate"] = true, ["no"] = true} barrier_whitelist = { [""] = true, ["cycle_barrier"] = true, ["bollard"] = true, ["entrance"] = true, ["cattle_grid"] = true, ["border_control"] = true, ["toll_booth"] = true, ["sally_port"] = true, ["gate"] = true, ["no"] = true}
access_tag_whitelist = { ["yes"] = true, ["foot"] = true, ["permissive"] = true, ["designated"] = true } access_tag_whitelist = { ["yes"] = true, ["foot"] = true, ["permissive"] = true, ["designated"] = true }
access_tag_blacklist = { ["no"] = true, ["private"] = true, ["agricultural"] = true, ["forestery"] = true } access_tag_blacklist = { ["no"] = true, ["private"] = true, ["agricultural"] = true, ["forestry"] = true }
access_tag_restricted = { ["destination"] = true, ["delivery"] = true } access_tag_restricted = { ["destination"] = true, ["delivery"] = true }
access_tags_hierachy = { "foot", "access" } access_tags_hierachy = { "foot", "access" }
service_tag_restricted = { ["parking_aisle"] = true } service_tag_restricted = { ["parking_aisle"] = true }
@ -35,32 +36,36 @@ speeds = {
} }
route_speeds = { route_speeds = {
["ferry"] = 5 ["ferry"] = 5
} }
platform_speeds = { platform_speeds = {
["platform"] = walking_speed ["platform"] = walking_speed
} }
amenity_speeds = { amenity_speeds = {
["parking"] = walking_speed, ["parking"] = walking_speed,
["parking_entrance"] = walking_speed ["parking_entrance"] = walking_speed
} }
man_made_speeds = { man_made_speeds = {
["pier"] = walking_speed ["pier"] = walking_speed
} }
surface_speeds = { surface_speeds = {
["fine_gravel"] = walking_speed*0.75, ["fine_gravel"] = walking_speed*0.75,
["gravel"] = walking_speed*0.75, ["gravel"] = walking_speed*0.75,
["pebbelstone"] = walking_speed*0.75, ["pebblestone"] = walking_speed*0.75,
["mud"] = walking_speed*0.5, ["mud"] = walking_speed*0.5,
["sand"] = walking_speed*0.5 ["sand"] = walking_speed*0.5
} }
traffic_signal_penalty = 2 leisure_speeds = {
u_turn_penalty = 2 ["track"] = walking_speed
}
traffic_signal_penalty = 2
u_turn_penalty = 2
use_turn_restrictions = false use_turn_restrictions = false
--modes --modes
@ -68,144 +73,150 @@ local mode_normal = 1
local mode_ferry = 2 local mode_ferry = 2
function get_exceptions(vector) function get_exceptions(vector)
for i,v in ipairs(restriction_exception_tags) do for i,v in ipairs(restriction_exception_tags) do
vector:Add(v) vector:Add(v)
end end
end end
function node_function (node, result) function node_function (node, result)
local barrier = node:get_value_by_key("barrier") local barrier = node:get_value_by_key("barrier")
local access = Access.find_access_tag(node, access_tags_hierachy) local access = find_access_tag(node, access_tags_hierachy)
local traffic_signal = node:get_value_by_key("highway") local traffic_signal = node:get_value_by_key("highway")
-- flag node if it carries a traffic light -- flag node if it carries a traffic light
if traffic_signal and traffic_signal == "traffic_signals" then if traffic_signal and traffic_signal == "traffic_signals" then
result.traffic_light = true result.traffic_light = true
end end
-- parse access and barrier tags -- parse access and barrier tags
if access and access ~= "" then if access and access ~= "" then
if access_tag_blacklist[access] then if access_tag_blacklist[access] then
result.barrier = true result.barrier = true
else else
result.barrier = false result.barrier = false
end end
elseif barrier and barrier ~= "" then elseif barrier and barrier ~= "" then
if barrier_whitelist[barrier] then if barrier_whitelist[barrier] then
result.barrier = false result.barrier = false
else else
result.barrier = true result.barrier = true
end end
end end
return 1 return 1
end end
function way_function (way, result) function way_function (way, result)
-- initial routability check, filters out buildings, boundaries, etc -- initial routability check, filters out buildings, boundaries, etc
local highway = way:get_value_by_key("highway") local highway = way:get_value_by_key("highway")
local route = way:get_value_by_key("route") local leisure = way:get_value_by_key("leisure")
local man_made = way:get_value_by_key("man_made") local route = way:get_value_by_key("route")
local railway = way:get_value_by_key("railway") local man_made = way:get_value_by_key("man_made")
local amenity = way:get_value_by_key("amenity") local railway = way:get_value_by_key("railway")
local public_transport = way:get_value_by_key("public_transport") local amenity = way:get_value_by_key("amenity")
if (not highway or highway == '') and local public_transport = way:get_value_by_key("public_transport")
(not route or route == '') and if (not highway or highway == '') and
(not railway or railway=='') and (not leisure or leisure == '') and
(not amenity or amenity=='') and (not route or route == '') and
(not man_made or man_made=='') and (not railway or railway=='') and
(not public_transport or public_transport=='') (not amenity or amenity=='') and
then (not man_made or man_made=='') and
return (not public_transport or public_transport=='')
end then
return
end
-- don't route on ways that are still under construction -- don't route on ways that are still under construction
if highway=='construction' then if highway=='construction' then
return return
end end
-- access -- access
local access = Access.find_access_tag(way, access_tags_hierachy) local access = find_access_tag(way, access_tags_hierachy)
if access_tag_blacklist[access] then if access_tag_blacklist[access] then
return return
end end
local name = way:get_value_by_key("name") local name = way:get_value_by_key("name")
local ref = way:get_value_by_key("ref") local ref = way:get_value_by_key("ref")
local junction = way:get_value_by_key("junction") local junction = way:get_value_by_key("junction")
local onewayClass = way:get_value_by_key("oneway:foot") local onewayClass = way:get_value_by_key("oneway:foot")
local duration = way:get_value_by_key("duration") local duration = way:get_value_by_key("duration")
local service = way:get_value_by_key("service") local service = way:get_value_by_key("service")
local area = way:get_value_by_key("area") local area = way:get_value_by_key("area")
local foot = way:get_value_by_key("foot") local foot = way:get_value_by_key("foot")
local surface = way:get_value_by_key("surface") local surface = way:get_value_by_key("surface")
-- name -- name
if ref and "" ~= ref and name and "" ~= name then if ref and "" ~= ref and name and "" ~= name then
result.name = name .. ' / ' .. ref result.name = name .. ' / ' .. ref
elseif ref and "" ~= ref then elseif ref and "" ~= ref then
result.name = ref result.name = ref
elseif name and "" ~= name then elseif name and "" ~= name then
result.name = name result.name = name
elseif highway then elseif highway then
result.name = "{highway:"..highway.."}" -- if no name exists, use way type result.name = "{highway:"..highway.."}" -- if no name exists, use way type
-- this encoding scheme is excepted to be a temporary solution -- this encoding scheme is excepted to be a temporary solution
end end
-- roundabouts -- roundabouts
if "roundabout" == junction then if "roundabout" == junction then
result.roundabout = true; result.roundabout = true;
end end
-- speed -- speed
if route_speeds[route] then if route_speeds[route] then
-- ferries (doesn't cover routes tagged using relations) -- ferries (doesn't cover routes tagged using relations)
result.ignore_in_grid = true result.ignore_in_grid = true
if duration and durationIsValid(duration) then if duration and durationIsValid(duration) then
result.duration = math.max( 1, parseDuration(duration) ) result.duration = math.max( 1, parseDuration(duration) )
else else
result.forward_speed = route_speeds[route] result.forward_speed = route_speeds[route]
result.backward_speed = route_speeds[route] result.backward_speed = route_speeds[route]
end end
result.forward_mode = mode_ferry result.forward_mode = mode_ferry
result.backward_mode = mode_ferry result.backward_mode = mode_ferry
elseif railway and platform_speeds[railway] then elseif railway and platform_speeds[railway] then
-- railway platforms (old tagging scheme) -- railway platforms (old tagging scheme)
result.forward_speed = platform_speeds[railway] result.forward_speed = platform_speeds[railway]
result.backward_speed = platform_speeds[railway] result.backward_speed = platform_speeds[railway]
elseif platform_speeds[public_transport] then elseif platform_speeds[public_transport] then
-- public_transport platforms (new tagging platform) -- public_transport platforms (new tagging platform)
result.forward_speed = platform_speeds[public_transport] result.forward_speed = platform_speeds[public_transport]
result.backward_speed = platform_speeds[public_transport] result.backward_speed = platform_speeds[public_transport]
elseif amenity and amenity_speeds[amenity] then elseif amenity and amenity_speeds[amenity] then
-- parking areas -- parking areas
result.forward_speed = amenity_speeds[amenity] result.forward_speed = amenity_speeds[amenity]
result.backward_speed = amenity_speeds[amenity] result.backward_speed = amenity_speeds[amenity]
elseif speeds[highway] then elseif leisure and leisure_speeds[leisure] then
-- regular ways -- running tracks
result.forward_speed = speeds[highway] result.forward_speed = leisure_speeds[leisure]
result.backward_speed = speeds[highway] result.backward_speed = leisure_speeds[leisure]
elseif access and access_tag_whitelist[access] then elseif speeds[highway] then
-- unknown way, but valid access tag -- regular ways
result.forward_speed = walking_speed result.forward_speed = speeds[highway]
result.backward_speed = walking_speed result.backward_speed = speeds[highway]
end elseif access and access_tag_whitelist[access] then
-- unknown way, but valid access tag
result.forward_speed = walking_speed
result.backward_speed = walking_speed
end
-- oneway -- oneway
if onewayClass == "yes" or onewayClass == "1" or onewayClass == "true" then if onewayClass == "yes" or onewayClass == "1" or onewayClass == "true" then
result.backward_mode = 0 result.backward_mode = 0
elseif onewayClass == "no" or onewayClass == "0" or onewayClass == "false" then elseif onewayClass == "no" or onewayClass == "0" or onewayClass == "false" then
-- nothing to do -- nothing to do
elseif onewayClass == "-1" then elseif onewayClass == "-1" then
result.forward_mode = 0 result.forward_mode = 0
end end
-- surfaces -- surfaces
if surface then if surface then
surface_speed = surface_speeds[surface] surface_speed = surface_speeds[surface]
if surface_speed then if surface_speed then
result.forward_speed = math.min(result.forward_speed, surface_speed) result.forward_speed = math.min(result.forward_speed, surface_speed)
result.backward_speed = math.min(result.backward_speed, surface_speed) result.backward_speed = math.min(result.backward_speed, surface_speed)
end
end end
end
end end

View File

@ -1,13 +1,15 @@
local ipairs = ipairs local ipairs = ipairs
module "Access" local Access = {}
function find_access_tag(source,access_tags_hierachy) function Access.find_access_tag(source,access_tags_hierachy)
for i,v in ipairs(access_tags_hierachy) do for i,v in ipairs(access_tags_hierachy) do
local tag = source:get_value_by_key(v) local tag = source:get_value_by_key(v)
if tag and tag ~= '' then if tag and tag ~= '' then
return tag return tag
end end
end end
return nil return ""
end end
return Access

View File

@ -1,8 +1,8 @@
local math = math local math = math
module "MaxSpeed" local MaxSpeed = {}
function limit(way,max,maxf,maxb) function MaxSpeed.limit(way,max,maxf,maxb)
if maxf and maxf>0 then if maxf and maxf>0 then
way.forward_speed = math.min(way.forward_speed, maxf) way.forward_speed = math.min(way.forward_speed, maxf)
elseif max and max>0 then elseif max and max>0 then
@ -15,3 +15,5 @@ function limit(way,max,maxf,maxb)
way.backward_speed = math.min(way.backward_speed, max) way.backward_speed = math.min(way.backward_speed, max)
end end
end end
return MaxSpeed

View File

@ -103,8 +103,8 @@ class AlternativeRouting final
int upper_bound_to_shortest_path_distance = INVALID_EDGE_WEIGHT; int upper_bound_to_shortest_path_distance = INVALID_EDGE_WEIGHT;
NodeID middle_node = SPECIAL_NODEID; NodeID middle_node = SPECIAL_NODEID;
const EdgeWeight min_edge_offset = const EdgeWeight min_edge_offset =
std::min(phantom_node_pair.source_phantom.GetForwardWeightPlusOffset(), std::min(-phantom_node_pair.source_phantom.GetForwardWeightPlusOffset(),
phantom_node_pair.source_phantom.GetReverseWeightPlusOffset()); -phantom_node_pair.source_phantom.GetReverseWeightPlusOffset());
if (phantom_node_pair.source_phantom.forward_node_id != SPECIAL_NODEID) if (phantom_node_pair.source_phantom.forward_node_id != SPECIAL_NODEID)
{ {

View File

@ -93,8 +93,8 @@ class ShortestPathRouting final
const bool allow_u_turn = current_leg > 0 && uturn_indicators.size() > current_leg && const bool allow_u_turn = current_leg > 0 && uturn_indicators.size() > current_leg &&
uturn_indicators[current_leg - 1]; uturn_indicators[current_leg - 1];
const EdgeWeight min_edge_offset = const EdgeWeight min_edge_offset =
std::min(phantom_node_pair.source_phantom.GetForwardWeightPlusOffset(), std::min(-phantom_node_pair.source_phantom.GetForwardWeightPlusOffset(),
phantom_node_pair.source_phantom.GetReverseWeightPlusOffset()); -phantom_node_pair.source_phantom.GetReverseWeightPlusOffset());
// insert new starting nodes into forward heap, adjusted by previous distances. // insert new starting nodes into forward heap, adjusted by previous distances.
if ((allow_u_turn || search_from_1st_node) && if ((allow_u_turn || search_from_1st_node) &&
@ -272,11 +272,13 @@ class ShortestPathRouting final
if (start_id_of_leg1 != last_id_of_packed_legs1) if (start_id_of_leg1 != last_id_of_packed_legs1)
{ {
packed_legs1 = packed_legs2; packed_legs1 = packed_legs2;
distance1 = distance2;
BOOST_ASSERT(start_id_of_leg1 == temporary_packed_leg1.front()); BOOST_ASSERT(start_id_of_leg1 == temporary_packed_leg1.front());
} }
else if (start_id_of_leg2 != last_id_of_packed_legs2) else if (start_id_of_leg2 != last_id_of_packed_legs2)
{ {
packed_legs2 = packed_legs1; packed_legs2 = packed_legs1;
distance2 = distance1;
BOOST_ASSERT(start_id_of_leg2 == temporary_packed_leg2.front()); BOOST_ASSERT(start_id_of_leg2 == temporary_packed_leg2.front());
} }
} }

View File

@ -42,7 +42,7 @@ template <typename Iterator, class HandlerT> struct APIGrammar : qi::grammar<Ite
*(query) >> -(uturns); *(query) >> -(uturns);
query = ('?') >> (+(zoom | output | jsonp | checksum | location | hint | timestamp | u | cmp | query = ('?') >> (+(zoom | output | jsonp | checksum | location | hint | timestamp | u | cmp |
language | instruction | geometry | alt_route | old_API | num_results | language | instruction | geometry | alt_route | old_API | num_results |
matching_beta | gps_precision | classify)); matching_beta | gps_precision | classify | locs));
zoom = (-qi::lit('&')) >> qi::lit('z') >> '=' >> zoom = (-qi::lit('&')) >> qi::lit('z') >> '=' >>
qi::short_[boost::bind(&HandlerT::setZoomLevel, handler, ::_1)]; qi::short_[boost::bind(&HandlerT::setZoomLevel, handler, ::_1)];
@ -83,17 +83,20 @@ template <typename Iterator, class HandlerT> struct APIGrammar : qi::grammar<Ite
qi::short_[boost::bind(&HandlerT::setGPSPrecision, handler, ::_1)]; qi::short_[boost::bind(&HandlerT::setGPSPrecision, handler, ::_1)];
classify = (-qi::lit('&')) >> qi::lit("classify") >> '=' >> classify = (-qi::lit('&')) >> qi::lit("classify") >> '=' >>
qi::bool_[boost::bind(&HandlerT::setClassify, handler, ::_1)]; qi::bool_[boost::bind(&HandlerT::setClassify, handler, ::_1)];
locs = (-qi::lit('&')) >> qi::lit("locs") >> '=' >>
stringforPolyline[boost::bind(&HandlerT::getCoordinatesFromGeometry, handler, ::_1)];
string = +(qi::char_("a-zA-Z")); string = +(qi::char_("a-zA-Z"));
stringwithDot = +(qi::char_("a-zA-Z0-9_.-")); stringwithDot = +(qi::char_("a-zA-Z0-9_.-"));
stringwithPercent = +(qi::char_("a-zA-Z0-9_.-") | qi::char_('[') | qi::char_(']') | stringwithPercent = +(qi::char_("a-zA-Z0-9_.-") | qi::char_('[') | qi::char_(']') |
(qi::char_('%') >> qi::char_("0-9A-Z") >> qi::char_("0-9A-Z"))); (qi::char_('%') >> qi::char_("0-9A-Z") >> qi::char_("0-9A-Z")));
stringforPolyline = +(qi::char_("a-zA-Z0-9_.-[]{}@?|\\%~`^"));
} }
qi::rule<Iterator> api_call, query; qi::rule<Iterator> api_call, query;
qi::rule<Iterator, std::string()> service, zoom, output, string, jsonp, checksum, location, qi::rule<Iterator, std::string()> service, zoom, output, string, jsonp, checksum, location,
hint, timestamp, stringwithDot, stringwithPercent, language, instruction, geometry, cmp, alt_route, u, hint, timestamp, stringwithDot, stringwithPercent, language, instruction, geometry, cmp, alt_route, u,
uturns, old_API, num_results, matching_beta, gps_precision, classify; uturns, old_API, num_results, matching_beta, gps_precision, classify, locs, stringforPolyline;
HandlerT *handler; HandlerT *handler;
}; };

View File

@ -42,7 +42,8 @@ namespace http
RequestParser::RequestParser() RequestParser::RequestParser()
: state(internal_state::method_start), current_header({"", ""}), : state(internal_state::method_start), current_header({"", ""}),
selected_compression(no_compression) selected_compression(no_compression), is_post_header(false),
content_length(0)
{ {
} }
@ -58,6 +59,11 @@ RequestParser::parse(request &current_request, char *begin, char *end)
} }
} }
osrm::tribool result = osrm::tribool::indeterminate; osrm::tribool result = osrm::tribool::indeterminate;
if(state == internal_state::post_request && content_length <= 0)
{
result = osrm::tribool::yes;
}
return std::make_tuple(result, selected_compression); return std::make_tuple(result, selected_compression);
} }
@ -70,8 +76,39 @@ osrm::tribool RequestParser::consume(request &current_request, const char input)
{ {
return osrm::tribool::no; return osrm::tribool::no;
} }
if(input == 'P')
{
state = internal_state::post_O;
return osrm::tribool::indeterminate;
}
state = internal_state::method; state = internal_state::method;
return osrm::tribool::indeterminate; return osrm::tribool::indeterminate;
case internal_state::post_O:
if(input == 'O')
{
state = internal_state::post_S;
return osrm::tribool::indeterminate;
}
return osrm::tribool::no;
case internal_state::post_S:
if(input == 'S')
{
state = internal_state::post_T;
return osrm::tribool::indeterminate;
}
return osrm::tribool::no;
case internal_state::post_T:
if(input == 'T')
{
is_post_header = true;
state = internal_state::method;
return osrm::tribool::indeterminate;
}
return osrm::tribool::no;
case internal_state::post_request:
current_request.uri.push_back(input);
--content_length;
return osrm::tribool::indeterminate;
case internal_state::method: case internal_state::method:
if (input == ' ') if (input == ' ')
{ {
@ -204,6 +241,24 @@ osrm::tribool RequestParser::consume(request &current_request, const char input)
{ {
current_request.agent = current_header.value; current_request.agent = current_header.value;
} }
if (boost::iequals(current_header.name, "Content-Length"))
{
try
{
content_length = std::stoi(current_header.value);
}
catch (const std::exception &e)
{
// Ignore the header if the parameter isn't an int
}
}
if (boost::iequals(current_header.name, "Content-Type"))
{
if (!boost::icontains(current_header.value, "application/x-www-form-urlencoded"))
{
return osrm::tribool::no;
}
}
if (input == '\r') if (input == '\r')
{ {
@ -272,7 +327,19 @@ osrm::tribool RequestParser::consume(request &current_request, const char input)
return osrm::tribool::indeterminate; return osrm::tribool::indeterminate;
} }
return osrm::tribool::no; return osrm::tribool::no;
default: // expecting_newline_3 case internal_state::expecting_newline_3:
if(input == '\n')
{
if(is_post_header)
{
current_request.uri.push_back('?');
state = internal_state::post_request;
return osrm::tribool::indeterminate;
}
return osrm::tribool::yes;
}
return osrm::tribool::no;
default: // should never be reached
return input == '\n' ? osrm::tribool::yes : osrm::tribool::no; return input == '\n' ? osrm::tribool::yes : osrm::tribool::no;
} }
} }

View File

@ -80,11 +80,17 @@ class RequestParser
space_before_header_value, space_before_header_value,
header_value, header_value,
expecting_newline_2, expecting_newline_2,
expecting_newline_3 expecting_newline_3,
post_O,
post_S,
post_T,
post_request
} state; } state;
header current_header; header current_header;
compression_type selected_compression; compression_type selected_compression;
bool is_post_header;
int content_length;
}; };
} // namespace http } // namespace http

View File

@ -76,12 +76,68 @@ void DeleteFileIfExists(const std::string &file_name)
} }
} }
void LoadRestrictions(const char* path, std::vector<TurnRestriction>& restriction_list)
{
std::ifstream input_stream(path, std::ios::binary);
if (!input_stream.is_open())
{
throw osrm::exception("Cannot open restriction file");
}
loadRestrictionsFromFile(input_stream, restriction_list);
}
std::size_t LoadGraph(const char* path,
std::vector<QueryNode>& coordinate_list,
std::vector<NodeID>& barrier_node_list,
std::vector<TarjanEdge>& graph_edge_list)
{
std::ifstream input_stream(path, std::ifstream::in | std::ifstream::binary);
if (!input_stream.is_open())
{
throw osrm::exception("Cannot open osrm file");
}
// load graph data
std::vector<NodeBasedEdge> edge_list;
std::vector<NodeID> traffic_light_node_list;
auto number_of_nodes = loadNodesFromFile(input_stream, barrier_node_list,
traffic_light_node_list,
coordinate_list);
loadEdgesFromFile(input_stream, edge_list);
traffic_light_node_list.clear();
traffic_light_node_list.shrink_to_fit();
// Building an node-based graph
for (const auto &input_edge : edge_list)
{
if (input_edge.source == input_edge.target)
{
continue;
}
if (input_edge.forward)
{
graph_edge_list.emplace_back(input_edge.source, input_edge.target,
(std::max)(input_edge.weight, 1), input_edge.name_id);
}
if (input_edge.backward)
{
graph_edge_list.emplace_back(input_edge.target, input_edge.source,
(std::max)(input_edge.weight, 1), input_edge.name_id);
}
}
return number_of_nodes;
}
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
std::vector<QueryNode> coordinate_list; std::vector<QueryNode> coordinate_list;
std::vector<TurnRestriction> restriction_list; std::vector<TurnRestriction> restriction_list;
std::vector<NodeID> bollard_node_list; std::vector<NodeID> barrier_node_list;
std::vector<NodeID> traffic_lights_list;
LogPolicy::GetInstance().Unmute(); LogPolicy::GetInstance().Unmute();
try try
@ -95,83 +151,10 @@ int main(int argc, char *argv[])
} }
SimpleLogger().Write() << "Using restrictions from file: " << argv[2]; SimpleLogger().Write() << "Using restrictions from file: " << argv[2];
std::ifstream restriction_ifstream(argv[2], std::ios::binary);
const FingerPrint fingerprint_orig;
FingerPrint fingerprint_loaded;
restriction_ifstream.read(reinterpret_cast<char *>(&fingerprint_loaded),
sizeof(FingerPrint));
// check fingerprint and warn if necessary
if (!fingerprint_loaded.TestGraphUtil(fingerprint_orig))
{
SimpleLogger().Write(logWARNING) << argv[2] << " was prepared with a different build. "
"Reprocess to get rid of this warning.";
}
if (!restriction_ifstream.good())
{
throw osrm::exception("Could not access <osrm-restrictions> files");
}
uint32_t usable_restrictions = 0;
restriction_ifstream.read(reinterpret_cast<char *>(&usable_restrictions), sizeof(uint32_t));
restriction_list.resize(usable_restrictions);
// load restrictions
if (usable_restrictions > 0)
{
restriction_ifstream.read(reinterpret_cast<char *>(&restriction_list[0]),
usable_restrictions * sizeof(TurnRestriction));
}
restriction_ifstream.close();
std::ifstream input_stream(argv[1], std::ifstream::in | std::ifstream::binary);
if (!input_stream.is_open())
{
throw osrm::exception("Cannot open osrm file");
}
// load graph data
std::vector<ImportEdge> edge_list;
const NodeID number_of_nodes =
readBinaryOSRMGraphFromStream(input_stream, edge_list, bollard_node_list,
traffic_lights_list, &coordinate_list, restriction_list);
input_stream.close();
BOOST_ASSERT_MSG(restriction_list.size() == usable_restrictions,
"size of restriction_list changed");
SimpleLogger().Write() << restriction_list.size() << " restrictions, "
<< bollard_node_list.size() << " bollard nodes, "
<< traffic_lights_list.size() << " traffic lights";
traffic_lights_list.clear();
traffic_lights_list.shrink_to_fit();
// Building an node-based graph
std::vector<TarjanEdge> graph_edge_list; std::vector<TarjanEdge> graph_edge_list;
// DeallocatingVector<TarjanEdge> graph_edge_list; auto number_of_nodes = LoadGraph(argv[1], coordinate_list, barrier_node_list, graph_edge_list);
for (const auto &input_edge : edge_list) LoadRestrictions(argv[2], restriction_list);
{
if (input_edge.source == input_edge.target)
{
continue;
}
if (input_edge.forward)
{
graph_edge_list.emplace_back(input_edge.source, input_edge.target,
(std::max)(input_edge.weight, 1), input_edge.name_id);
}
if (input_edge.backward)
{
graph_edge_list.emplace_back(input_edge.target, input_edge.source,
(std::max)(input_edge.weight, 1), input_edge.name_id);
}
}
edge_list.clear();
edge_list.shrink_to_fit();
BOOST_ASSERT_MSG(0 == edge_list.size() && 0 == edge_list.capacity(),
"input edge vector not properly deallocated");
tbb::parallel_sort(graph_edge_list.begin(), graph_edge_list.end()); tbb::parallel_sort(graph_edge_list.begin(), graph_edge_list.end());
const auto graph = std::make_shared<TarjanGraph>(number_of_nodes, graph_edge_list); const auto graph = std::make_shared<TarjanGraph>(number_of_nodes, graph_edge_list);
@ -181,9 +164,8 @@ int main(int argc, char *argv[])
SimpleLogger().Write() << "Starting SCC graph traversal"; SimpleLogger().Write() << "Starting SCC graph traversal";
RestrictionMap restriction_map(restriction_list); RestrictionMap restriction_map(restriction_list);
auto tarjan = osrm::make_unique<TarjanSCC<TarjanGraph>>(graph, auto tarjan = osrm::make_unique<TarjanSCC<TarjanGraph>>(graph, restriction_map,
restriction_map, barrier_node_list);
bollard_node_list);
tarjan->run(); tarjan->run();
SimpleLogger().Write() << "identified: " << tarjan->get_number_of_components() SimpleLogger().Write() << "identified: " << tarjan->get_number_of_components()
<< " many components"; << " many components";

View File

@ -1,199 +0,0 @@
#include "../data_structures/dynamic_graph.hpp"
#include "../data_structures/import_edge.hpp"
#include "../data_structures/query_node.hpp"
#include "../data_structures/restriction.hpp"
#include "../data_structures/static_graph.hpp"
#include "../util/fingerprint.hpp"
#include "../util/graph_loader.hpp"
#include "../util/integer_range.hpp"
#include "../util/make_unique.hpp"
#include "../util/osrm_exception.hpp"
#include "../util/simple_logger.hpp"
#include "../typedefs.h"
#include <algorithm>
#include <fstream>
struct TarjanEdgeData
{
TarjanEdgeData() : distance(INVALID_EDGE_WEIGHT), name_id(INVALID_NAMEID) {}
TarjanEdgeData(unsigned distance, unsigned name_id) : distance(distance), name_id(name_id) {}
unsigned distance;
unsigned name_id;
};
using StaticTestGraph = StaticGraph<TarjanEdgeData>;
using DynamicTestGraph = StaticGraph<TarjanEdgeData>;
using StaticEdge = StaticTestGraph::InputEdge;
using DynamicEdge = DynamicTestGraph::InputEdge;
int main(int argc, char *argv[])
{
std::vector<QueryNode> coordinate_list;
std::vector<TurnRestriction> restriction_list;
std::vector<NodeID> bollard_node_list;
std::vector<NodeID> traffic_lights_list;
LogPolicy::GetInstance().Unmute();
try
{
// enable logging
if (argc < 3)
{
SimpleLogger().Write(logWARNING) << "usage:\n" << argv[0]
<< " <osrm> <osrm.restrictions>";
return -1;
}
SimpleLogger().Write() << "Using restrictions from file: " << argv[2];
std::ifstream restriction_ifstream(argv[2], std::ios::binary);
const FingerPrint fingerprint_orig;
FingerPrint fingerprint_loaded;
restriction_ifstream.read(reinterpret_cast<char *>(&fingerprint_loaded),
sizeof(FingerPrint));
// check fingerprint and warn if necessary
if (!fingerprint_loaded.TestGraphUtil(fingerprint_orig))
{
SimpleLogger().Write(logWARNING) << argv[2] << " was prepared with a different build. "
"Reprocess to get rid of this warning.";
}
if (!restriction_ifstream.good())
{
throw osrm::exception("Could not access <osrm-restrictions> files");
}
uint32_t usable_restrictions = 0;
restriction_ifstream.read(reinterpret_cast<char *>(&usable_restrictions), sizeof(uint32_t));
restriction_list.resize(usable_restrictions);
// load restrictions
if (usable_restrictions > 0)
{
restriction_ifstream.read(reinterpret_cast<char *>(&restriction_list[0]),
usable_restrictions * sizeof(TurnRestriction));
}
restriction_ifstream.close();
std::ifstream input_stream(argv[1], std::ifstream::in | std::ifstream::binary);
if (!input_stream.is_open())
{
throw osrm::exception("Cannot open osrm file");
}
// load graph data
std::vector<ImportEdge> edge_list;
const NodeID number_of_nodes =
readBinaryOSRMGraphFromStream(input_stream, edge_list, bollard_node_list,
traffic_lights_list, &coordinate_list, restriction_list);
input_stream.close();
BOOST_ASSERT_MSG(restriction_list.size() == usable_restrictions,
"size of restriction_list changed");
SimpleLogger().Write() << restriction_list.size() << " restrictions, "
<< bollard_node_list.size() << " bollard nodes, "
<< traffic_lights_list.size() << " traffic lights";
traffic_lights_list.clear();
traffic_lights_list.shrink_to_fit();
// Building an node-based graph
std::vector<StaticEdge> static_graph_edge_list;
std::vector<DynamicEdge> dynamic_graph_edge_list;
for (const auto &input_edge : edge_list)
{
if (input_edge.source == input_edge.target)
{
continue;
}
if (input_edge.forward)
{
static_graph_edge_list.emplace_back(input_edge.source, input_edge.target,
(std::max)(input_edge.weight, 1),
input_edge.name_id);
dynamic_graph_edge_list.emplace_back(input_edge.source, input_edge.target,
(std::max)(input_edge.weight, 1),
input_edge.name_id);
}
if (input_edge.backward)
{
dynamic_graph_edge_list.emplace_back(input_edge.target, input_edge.source,
(std::max)(input_edge.weight, 1),
input_edge.name_id);
static_graph_edge_list.emplace_back(input_edge.target, input_edge.source,
(std::max)(input_edge.weight, 1),
input_edge.name_id);
}
}
edge_list.clear();
edge_list.shrink_to_fit();
BOOST_ASSERT_MSG(0 == edge_list.size() && 0 == edge_list.capacity(),
"input edge vector not properly deallocated");
tbb::parallel_sort(static_graph_edge_list.begin(), static_graph_edge_list.end());
tbb::parallel_sort(dynamic_graph_edge_list.begin(), dynamic_graph_edge_list.end());
auto static_graph =
osrm::make_unique<StaticTestGraph>(number_of_nodes, static_graph_edge_list);
auto dynamic_graph =
osrm::make_unique<DynamicTestGraph>(number_of_nodes, dynamic_graph_edge_list);
SimpleLogger().Write() << "Starting static/dynamic graph comparison";
BOOST_ASSERT(static_graph->GetNumberOfNodes() == dynamic_graph->GetNumberOfNodes());
BOOST_ASSERT(static_graph->GetNumberOfEdges() == dynamic_graph->GetNumberOfEdges());
for (const auto node : osrm::irange(0u, static_graph->GetNumberOfNodes()))
{
const auto static_range = static_graph->GetAdjacentEdgeRange(node);
const auto dynamic_range = dynamic_graph->GetAdjacentEdgeRange(node);
SimpleLogger().Write() << "checking node " << node << "/"
<< static_graph->GetNumberOfNodes();
BOOST_ASSERT(static_range.size() == dynamic_range.size());
const auto static_begin = static_graph->BeginEdges(node);
const auto dynamic_begin = dynamic_graph->BeginEdges(node);
// check raw interface
for (const auto i : osrm::irange(0u, static_range.size()))
{
const auto static_target = static_graph->GetTarget(static_begin + i);
const auto dynamic_target = dynamic_graph->GetTarget(dynamic_begin + i);
BOOST_ASSERT(static_target == dynamic_target);
const auto static_data = static_graph->GetEdgeData(static_begin + i);
const auto dynamic_data = dynamic_graph->GetEdgeData(dynamic_begin + i);
BOOST_ASSERT(static_data.distance == dynamic_data.distance);
BOOST_ASSERT(static_data.name_id == dynamic_data.name_id);
}
// check range interface
std::vector<EdgeID> static_target_ids, dynamic_target_ids;
std::vector<TarjanEdgeData> static_edge_data, dynamic_edge_data;
for (const auto static_id : static_range)
{
static_target_ids.push_back(static_graph->GetTarget(static_id));
static_edge_data.push_back(static_graph->GetEdgeData(static_id));
}
for (const auto dynamic_id : dynamic_range)
{
dynamic_target_ids.push_back(dynamic_graph->GetTarget(dynamic_id));
dynamic_edge_data.push_back(dynamic_graph->GetEdgeData(dynamic_id));
}
BOOST_ASSERT(static_target_ids.size() == dynamic_target_ids.size());
BOOST_ASSERT(std::equal(std::begin(static_target_ids), std::end(static_target_ids),
std::begin(dynamic_target_ids)));
}
SimpleLogger().Write() << "Graph comparison finished successfully";
}
catch (const std::exception &e)
{
SimpleLogger().Write(logWARNING) << "[exception] " << e.what();
}
return 0;
}

View File

@ -41,24 +41,24 @@ BOOST_AUTO_TEST_CASE(all_necessary_test)
BOOST_AUTO_TEST_CASE(common_durations_get_translated) BOOST_AUTO_TEST_CASE(common_durations_get_translated)
{ {
BOOST_CHECK_EQUAL(parseDuration("00:01"), 600); BOOST_CHECK_EQUAL(parseDuration("00:01"), 60);
BOOST_CHECK_EQUAL(parseDuration("00:01:01"), 610); BOOST_CHECK_EQUAL(parseDuration("00:01:01"), 61);
BOOST_CHECK_EQUAL(parseDuration("01:01"), 36600); BOOST_CHECK_EQUAL(parseDuration("01:01"), 3660);
// check all combinations of iso duration tokens // check all combinations of iso duration tokens
BOOST_CHECK_EQUAL(parseDuration("PT1M1S"), 610); BOOST_CHECK_EQUAL(parseDuration("PT1M1S"), 61);
BOOST_CHECK_EQUAL(parseDuration("PT1H1S"), 36010); BOOST_CHECK_EQUAL(parseDuration("PT1H1S"), 3601);
BOOST_CHECK_EQUAL(parseDuration("PT15M"), 9000); BOOST_CHECK_EQUAL(parseDuration("PT15M"), 900);
BOOST_CHECK_EQUAL(parseDuration("PT15S"), 150); BOOST_CHECK_EQUAL(parseDuration("PT15S"), 15);
BOOST_CHECK_EQUAL(parseDuration("PT15H"), 540000); BOOST_CHECK_EQUAL(parseDuration("PT15H"), 54000);
BOOST_CHECK_EQUAL(parseDuration("PT1H15M"), 45000); BOOST_CHECK_EQUAL(parseDuration("PT1H15M"), 4500);
BOOST_CHECK_EQUAL(parseDuration("PT1H15M1S"), 45010); BOOST_CHECK_EQUAL(parseDuration("PT1H15M1S"), 4501);
} }
BOOST_AUTO_TEST_CASE(iso_8601_durations_case_insensitive) BOOST_AUTO_TEST_CASE(iso_8601_durations_case_insensitive)
{ {
BOOST_CHECK_EQUAL(parseDuration("PT15m"), 9000); BOOST_CHECK_EQUAL(parseDuration("PT15m"), 900);
BOOST_CHECK_EQUAL(parseDuration("PT1h15m"), 45000); BOOST_CHECK_EQUAL(parseDuration("PT1h15m"), 4500);
} }
BOOST_AUTO_TEST_SUITE_END() BOOST_AUTO_TEST_SUITE_END()

View File

@ -0,0 +1,74 @@
/*
Copyright (c) 2015, Project OSRM contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <boost/test/floating_point_comparison.hpp>
#include <boost/test/unit_test.hpp>
#include "../../algorithms/polyline_compressor.hpp"
#include "../../data_structures/coordinate_calculation.hpp"
#include <osrm/coordinate.hpp>
#include <cmath>
#include <vector>
BOOST_AUTO_TEST_CASE(geometry_string)
{
// Polyline string for the 5 coordinates
const std::string polyline = "_gjaR_gjaR_pR_ibE_pR_ibE_pR_ibE_pR_ibE";
PolylineCompressor pc;
std::vector<FixedPointCoordinate> coords = pc.decode_string(polyline);
// Test coordinates; these would be the coordinates we give the loc parameter,
// e.g. loc=10.00,10.0&loc=10.01,10.1...
FixedPointCoordinate coord1(10.00 * COORDINATE_PRECISION, 10.0 * COORDINATE_PRECISION);
FixedPointCoordinate coord2(10.01 * COORDINATE_PRECISION, 10.1 * COORDINATE_PRECISION);
FixedPointCoordinate coord3(10.02 * COORDINATE_PRECISION, 10.2 * COORDINATE_PRECISION);
FixedPointCoordinate coord4(10.03 * COORDINATE_PRECISION, 10.3 * COORDINATE_PRECISION);
FixedPointCoordinate coord5(10.04 * COORDINATE_PRECISION, 10.4 * COORDINATE_PRECISION);
// Put the test coordinates into the vector for comparison
std::vector<FixedPointCoordinate> cmp_coords;
cmp_coords.emplace_back(coord1);
cmp_coords.emplace_back(coord2);
cmp_coords.emplace_back(coord3);
cmp_coords.emplace_back(coord4);
cmp_coords.emplace_back(coord5);
BOOST_CHECK_EQUAL(cmp_coords.size(), coords.size());
for(unsigned i = 0; i < cmp_coords.size(); ++i)
{
const double cmp1_lat = coords.at(i).lat;
const double cmp2_lat = cmp_coords.at(i).lat;
BOOST_CHECK_CLOSE(cmp1_lat, cmp2_lat, 0.0001);
const double cmp1_lon = coords.at(i).lon;
const double cmp2_lon = cmp_coords.at(i).lon;
BOOST_CHECK_CLOSE(cmp1_lon, cmp2_lon, 0.0001);
}
}

View File

@ -29,23 +29,22 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define FINGERPRINT_H #define FINGERPRINT_H
#include <boost/uuid/uuid.hpp> #include <boost/uuid/uuid.hpp>
#include <type_traits>
// implements a singleton, i.e. there is one and only one conviguration object // implements a singleton, i.e. there is one and only one conviguration object
class FingerPrint class FingerPrint
{ {
public: public:
FingerPrint(); static FingerPrint GetValid();
FingerPrint(const FingerPrint &) = delete;
~FingerPrint();
const boost::uuids::uuid &GetFingerPrint() const; const boost::uuids::uuid &GetFingerPrint() const;
bool IsMagicNumberOK() const; bool IsMagicNumberOK(const FingerPrint &other) const;
bool TestGraphUtil(const FingerPrint &other) const; bool TestGraphUtil(const FingerPrint &other) const;
bool TestPrepare(const FingerPrint &other) const; bool TestPrepare(const FingerPrint &other) const;
bool TestRTree(const FingerPrint &other) const; bool TestRTree(const FingerPrint &other) const;
bool TestQueryObjects(const FingerPrint &other) const; bool TestQueryObjects(const FingerPrint &other) const;
private: private:
const unsigned magic_number; unsigned magic_number;
char md5_prepare[33]; char md5_prepare[33];
char md5_tree[33]; char md5_tree[33];
char md5_graph[33]; char md5_graph[33];
@ -54,6 +53,10 @@ class FingerPrint
// initialize to {6ba7b810-9dad-11d1-80b4-00c04fd430c8} // initialize to {6ba7b810-9dad-11d1-80b4-00c04fd430c8}
boost::uuids::uuid named_uuid; boost::uuids::uuid named_uuid;
bool has_64_bits; bool has_64_bits;
}; };
static_assert(std::is_trivial<FingerPrint>::value, "FingerPrint needs to be trivial.");
#endif /* FingerPrint_H */ #endif /* FingerPrint_H */

View File

@ -40,35 +40,38 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#cmakedefine MD5GRAPH "${MD5GRAPH}" #cmakedefine MD5GRAPH "${MD5GRAPH}"
#cmakedefine MD5OBJECTS "${MD5OBJECTS}" #cmakedefine MD5OBJECTS "${MD5OBJECTS}"
FingerPrint::FingerPrint() : magic_number(1297240911) FingerPrint FingerPrint::GetValid()
{ {
md5_prepare[32] = md5_tree[32] = md5_graph[32] = md5_objects[32] = '\0'; FingerPrint fingerprint;
boost::uuids::name_generator gen(named_uuid); fingerprint.magic_number = 1297240911;
fingerprint.md5_prepare[32] = fingerprint.md5_tree[32] = fingerprint.md5_graph[32] = fingerprint.md5_objects[32] = '\0';
boost::uuids::name_generator gen(fingerprint.named_uuid);
std::string temp_string; std::string temp_string;
std::memcpy(md5_prepare, MD5PREPARE, strlen(MD5PREPARE)); std::memcpy(fingerprint.md5_prepare, MD5PREPARE, 32);
temp_string += md5_prepare; temp_string += fingerprint.md5_prepare;
std::memcpy(md5_tree, MD5RTREE, 32); std::memcpy(fingerprint.md5_tree, MD5RTREE, 32);
temp_string += md5_tree; temp_string += fingerprint.md5_tree;
std::memcpy(md5_graph, MD5GRAPH, 32); std::memcpy(fingerprint.md5_graph, MD5GRAPH, 32);
temp_string += md5_graph; temp_string += fingerprint.md5_graph;
std::memcpy(md5_objects, MD5OBJECTS, 32); std::memcpy(fingerprint.md5_objects, MD5OBJECTS, 32);
temp_string += md5_objects; temp_string += fingerprint.md5_objects;
named_uuid = gen(temp_string); fingerprint.named_uuid = gen(temp_string);
has_64_bits = HAS64BITS; fingerprint.has_64_bits = HAS64BITS;
return fingerprint;
} }
FingerPrint::~FingerPrint() {}
const boost::uuids::uuid &FingerPrint::GetFingerPrint() const { return named_uuid; } const boost::uuids::uuid &FingerPrint::GetFingerPrint() const { return named_uuid; }
bool FingerPrint::IsMagicNumberOK() const { return 1297240911 == magic_number; } bool FingerPrint::IsMagicNumberOK(const FingerPrint& other) const { return other.magic_number == magic_number; }
bool FingerPrint::TestGraphUtil(const FingerPrint &other) const bool FingerPrint::TestGraphUtil(const FingerPrint &other) const
{ {
if (!other.IsMagicNumberOK()) if (!IsMagicNumberOK(other))
{ {
throw osrm::exception("hsgr input file misses magic number. Check or reprocess the file"); throw osrm::exception("hsgr input file misses magic number. Check or reprocess the file");
} }
@ -77,7 +80,7 @@ bool FingerPrint::TestGraphUtil(const FingerPrint &other) const
bool FingerPrint::TestPrepare(const FingerPrint &other) const bool FingerPrint::TestPrepare(const FingerPrint &other) const
{ {
if (!other.IsMagicNumberOK()) if (!IsMagicNumberOK(other))
{ {
throw osrm::exception("osrm input file misses magic number. Check or reprocess the file"); throw osrm::exception("osrm input file misses magic number. Check or reprocess the file");
} }
@ -86,7 +89,7 @@ bool FingerPrint::TestPrepare(const FingerPrint &other) const
bool FingerPrint::TestRTree(const FingerPrint &other) const bool FingerPrint::TestRTree(const FingerPrint &other) const
{ {
if (!other.IsMagicNumberOK()) if (!IsMagicNumberOK(other))
{ {
throw osrm::exception("r-tree input file misses magic number. Check or reprocess the file"); throw osrm::exception("r-tree input file misses magic number. Check or reprocess the file");
} }
@ -95,7 +98,7 @@ bool FingerPrint::TestRTree(const FingerPrint &other) const
bool FingerPrint::TestQueryObjects(const FingerPrint &other) const bool FingerPrint::TestQueryObjects(const FingerPrint &other) const
{ {
if (!other.IsMagicNumberOK()) if (!IsMagicNumberOK(other))
{ {
throw osrm::exception("missing magic number. Check or reprocess the file"); throw osrm::exception("missing magic number. Check or reprocess the file");
} }

View File

@ -52,243 +52,57 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
template <typename EdgeT> /**
NodeID read_undirected_osrm_stream(std::istream &input_stream, * Reads the .restrictions file and loads it to a vector.
std::vector<EdgeT> &edge_list, * The since the restrictions reference nodes using their external node id,
std::vector<FixedPointCoordinate> &coordinate_list) * we need to renumber it to the new internal id.
*/
unsigned loadRestrictionsFromFile(std::istream& input_stream,
std::vector<TurnRestriction>& restriction_list)
{ {
const FingerPrint fingerprint_orig; const FingerPrint fingerprint_valid = FingerPrint::GetValid();
FingerPrint fingerprint_loaded; FingerPrint fingerprint_loaded;
input_stream.read(reinterpret_cast<char *>(&fingerprint_loaded), sizeof(FingerPrint)); unsigned number_of_usable_restrictions = 0;
input_stream.read((char *)&fingerprint_loaded, sizeof(FingerPrint));
if (!fingerprint_loaded.TestGraphUtil(fingerprint_orig)) if (!fingerprint_loaded.TestPrepare(fingerprint_valid))
{ {
SimpleLogger().Write(logWARNING) << ".osrm was prepared with different build.\n" SimpleLogger().Write(logWARNING) << ".restrictions was prepared with different build.\n"
"Reprocess to get rid of this warning."; "Reprocess to get rid of this warning.";
} }
std::unordered_map<NodeID, NodeID> ext_to_int_id_map; input_stream.read((char *)&number_of_usable_restrictions, sizeof(unsigned));
restriction_list.resize(number_of_usable_restrictions);
NodeID n; if (number_of_usable_restrictions > 0)
input_stream.read(reinterpret_cast<char *>(&n), sizeof(NodeID));
SimpleLogger().Write() << "Importing n = " << n << " nodes ";
ExternalMemoryNode current_node;
for (NodeID i = 0; i < n; ++i)
{ {
input_stream.read(reinterpret_cast<char *>(&current_node), sizeof(ExternalMemoryNode)); input_stream.read((char *) restriction_list.data(),
coordinate_list.emplace_back(current_node.lat, current_node.lon); number_of_usable_restrictions * sizeof(TurnRestriction));
ext_to_int_id_map.emplace(current_node.node_id, i);
// if (current_node.barrier)
// {
// barrier_node_list.emplace_back(i);
// }
// if (current_node.traffic_lights)
// {
// traffic_light_node_list.emplace_back(i);
// }
} }
// tighten vector sizes return number_of_usable_restrictions;
// barrier_node_list.shrink_to_fit();
// traffic_light_node_list.shrink_to_fit();
// renumber nodes in turn restrictions
// for (TurnRestriction &current_restriction : restriction_list)
// {
// auto internal_id_iter = ext_to_int_id_map.find(current_restriction.from.node);
// if (internal_id_iter == ext_to_int_id_map.end())
// {
// SimpleLogger().Write(logDEBUG) << "Unmapped from node " <<
// current_restriction.from.node
// << " of restriction";
// continue;
// }
// current_restriction.from.node = internal_id_iter->second;
// internal_id_iter = ext_to_int_id_map.find(current_restriction.via.node);
// if (internal_id_iter == ext_to_int_id_map.end())
// {
// SimpleLogger().Write(logDEBUG) << "Unmapped via node " <<
// current_restriction.via.node
// << " of restriction";
// continue;
// }
// current_restriction.via.node = internal_id_iter->second;
// internal_id_iter = ext_to_int_id_map.find(current_restriction.to.node);
// if (internal_id_iter == ext_to_int_id_map.end())
// {
// SimpleLogger().Write(logDEBUG) << "Unmapped to node " << current_restriction.to.node
// << " of restriction";
// continue;
// }
// current_restriction.to.node = internal_id_iter->second;
// }
EdgeWeight weight;
NodeID source, target;
unsigned nameID;
int length;
short dir; // direction (0 = open, 1 = forward, 2+ = open)
bool is_roundabout, ignore_in_grid, is_access_restricted, is_split;
TravelMode travel_mode;
EdgeID m;
input_stream.read(reinterpret_cast<char *>(&m), sizeof(unsigned));
edge_list.reserve(m);
SimpleLogger().Write() << " and " << m << " edges ";
for (EdgeID i = 0; i < m; ++i)
{
input_stream.read(reinterpret_cast<char *>(&source), sizeof(unsigned));
input_stream.read(reinterpret_cast<char *>(&target), sizeof(unsigned));
input_stream.read(reinterpret_cast<char *>(&length), sizeof(int));
input_stream.read(reinterpret_cast<char *>(&dir), sizeof(short));
input_stream.read(reinterpret_cast<char *>(&weight), sizeof(int));
input_stream.read(reinterpret_cast<char *>(&nameID), sizeof(unsigned));
input_stream.read(reinterpret_cast<char *>(&is_roundabout), sizeof(bool));
input_stream.read(reinterpret_cast<char *>(&ignore_in_grid), sizeof(bool));
input_stream.read(reinterpret_cast<char *>(&is_access_restricted), sizeof(bool));
input_stream.read(reinterpret_cast<char *>(&travel_mode), sizeof(TravelMode));
input_stream.read(reinterpret_cast<char *>(&is_split), sizeof(bool));
BOOST_ASSERT_MSG(length > 0, "loaded null length edge");
BOOST_ASSERT_MSG(weight > 0, "loaded null weight");
BOOST_ASSERT_MSG(0 <= dir && dir <= 2, "loaded bogus direction");
// bool forward = true;
// bool backward = true;
// if (1 == dir)
// {
// backward = false;
// }
// if (2 == dir)
// {
// forward = false;
// }
// translate the external NodeIDs to internal IDs
auto internal_id_iter = ext_to_int_id_map.find(source);
if (ext_to_int_id_map.find(source) == ext_to_int_id_map.end())
{
#ifndef NDEBUG
SimpleLogger().Write(logWARNING) << " unresolved source NodeID: " << source;
#endif
continue;
}
source = internal_id_iter->second;
internal_id_iter = ext_to_int_id_map.find(target);
if (ext_to_int_id_map.find(target) == ext_to_int_id_map.end())
{
#ifndef NDEBUG
SimpleLogger().Write(logWARNING) << "unresolved target NodeID : " << target;
#endif
continue;
}
target = internal_id_iter->second;
BOOST_ASSERT_MSG(source != SPECIAL_NODEID && target != SPECIAL_NODEID,
"nonexisting source or target");
if (source > target)
{
std::swap(source, target);
// std::swap(forward, backward);
}
edge_list.emplace_back(source, target);
}
ext_to_int_id_map.clear();
tbb::parallel_sort(edge_list.begin(), edge_list.end());
// for (unsigned i = 1; i < edge_list.size(); ++i)
// {
// if ((edge_list[i - 1].target == edge_list[i].target) &&
// (edge_list[i - 1].source == edge_list[i].source))
// {
// const bool edge_flags_equivalent = (edge_list[i - 1].forward == edge_list[i].forward)
// &&
// (edge_list[i - 1].backward ==
// edge_list[i].backward);
// const bool edge_flags_are_superset1 =
// (edge_list[i - 1].forward && edge_list[i - 1].backward) &&
// (edge_list[i].forward != edge_list[i].backward);
// const bool edge_flags_are_superset_2 =
// (edge_list[i].forward && edge_list[i].backward) &&
// (edge_list[i - 1].forward != edge_list[i - 1].backward);
// if (edge_flags_equivalent)
// {
// edge_list[i].weight = std::min(edge_list[i - 1].weight, edge_list[i].weight);
// edge_list[i - 1].source = SPECIAL_NODEID;
// }
// else if (edge_flags_are_superset1)
// {
// if (edge_list[i - 1].weight <= edge_list[i].weight)
// {
// // edge i-1 is smaller and goes in both directions. Throw away the other edge
// edge_list[i].source = SPECIAL_NODEID;
// }
// else
// {
// // edge i-1 is open in both directions, but edge i is smaller in one
// direction.
// // Close edge i-1 in this direction
// edge_list[i - 1].forward = !edge_list[i].forward;
// edge_list[i - 1].backward = !edge_list[i].backward;
// }
// }
// else if (edge_flags_are_superset_2)
// {
// if (edge_list[i - 1].weight <= edge_list[i].weight)
// {
// // edge i-1 is smaller for one direction. edge i is open in both. close edge
// i
// // in the other direction
// edge_list[i].forward = !edge_list[i - 1].forward;
// edge_list[i].backward = !edge_list[i - 1].backward;
// }
// else
// {
// // edge i is smaller and goes in both direction. Throw away edge i-1
// edge_list[i - 1].source = SPECIAL_NODEID;
// }
// }
// }
// }
// const auto new_end_iter =
// std::remove_if(edge_list.begin(), edge_list.end(), [](const EdgeT &edge)
// {
// return edge.source == SPECIAL_NODEID || edge.target == SPECIAL_NODEID;
// });
// edge_list.erase(new_end_iter, edge_list.end()); // remove excess candidates.
// edge_list.shrink_to_fit();
SimpleLogger().Write() << "Graph loaded ok and has " << edge_list.size() << " edges";
return n;
} }
template <typename EdgeT>
NodeID readBinaryOSRMGraphFromStream(std::istream &input_stream, /**
std::vector<EdgeT> &edge_list, * Reads the beginning of an .osrm file and produces:
std::vector<NodeID> &barrier_node_list, * - list of barrier nodes
std::vector<NodeID> &traffic_light_node_list, * - list of traffic lights
std::vector<QueryNode> *int_to_ext_node_id_map, * - nodes indexed by their internal (non-osm) id
std::vector<TurnRestriction> &restriction_list) */
NodeID loadNodesFromFile(std::istream &input_stream,
std::vector<NodeID> &barrier_node_list,
std::vector<NodeID> &traffic_light_node_list,
std::vector<QueryNode> &node_array)
{ {
const FingerPrint fingerprint_orig; const FingerPrint fingerprint_valid = FingerPrint::GetValid();
FingerPrint fingerprint_loaded; FingerPrint fingerprint_loaded;
input_stream.read(reinterpret_cast<char *>(&fingerprint_loaded), sizeof(FingerPrint)); input_stream.read(reinterpret_cast<char *>(&fingerprint_loaded), sizeof(FingerPrint));
if (!fingerprint_loaded.TestGraphUtil(fingerprint_orig)) if (!fingerprint_loaded.TestPrepare(fingerprint_valid))
{ {
SimpleLogger().Write(logWARNING) << ".osrm was prepared with different build.\n" SimpleLogger().Write(logWARNING) << ".osrm was prepared with different build.\n"
"Reprocess to get rid of this warning."; "Reprocess to get rid of this warning.";
} }
std::unordered_map<NodeID, NodeID> ext_to_int_id_map;
NodeID n; NodeID n;
input_stream.read(reinterpret_cast<char *>(&n), sizeof(NodeID)); input_stream.read(reinterpret_cast<char *>(&n), sizeof(NodeID));
SimpleLogger().Write() << "Importing n = " << n << " nodes "; SimpleLogger().Write() << "Importing n = " << n << " nodes ";
@ -297,9 +111,7 @@ NodeID readBinaryOSRMGraphFromStream(std::istream &input_stream,
for (NodeID i = 0; i < n; ++i) for (NodeID i = 0; i < n; ++i)
{ {
input_stream.read(reinterpret_cast<char *>(&current_node), sizeof(ExternalMemoryNode)); input_stream.read(reinterpret_cast<char *>(&current_node), sizeof(ExternalMemoryNode));
int_to_ext_node_id_map->emplace_back(current_node.lat, current_node.lon, node_array.emplace_back(current_node.lat, current_node.lon, current_node.node_id);
current_node.node_id);
ext_to_int_id_map.emplace(current_node.node_id, i);
if (current_node.barrier) if (current_node.barrier)
{ {
barrier_node_list.emplace_back(i); barrier_node_list.emplace_back(i);
@ -314,175 +126,48 @@ NodeID readBinaryOSRMGraphFromStream(std::istream &input_stream,
barrier_node_list.shrink_to_fit(); barrier_node_list.shrink_to_fit();
traffic_light_node_list.shrink_to_fit(); traffic_light_node_list.shrink_to_fit();
// renumber nodes in turn restrictions return n;
for (TurnRestriction &current_restriction : restriction_list) }
{
auto internal_id_iter = ext_to_int_id_map.find(current_restriction.from.node);
if (internal_id_iter == ext_to_int_id_map.end())
{
SimpleLogger().Write(logDEBUG) << "Unmapped from node " << current_restriction.from.node
<< " of restriction";
continue;
}
current_restriction.from.node = internal_id_iter->second;
internal_id_iter = ext_to_int_id_map.find(current_restriction.via.node);
if (internal_id_iter == ext_to_int_id_map.end())
{
SimpleLogger().Write(logDEBUG) << "Unmapped via node " << current_restriction.via.node
<< " of restriction";
continue;
}
current_restriction.via.node = internal_id_iter->second;
internal_id_iter = ext_to_int_id_map.find(current_restriction.to.node);
if (internal_id_iter == ext_to_int_id_map.end())
{
SimpleLogger().Write(logDEBUG) << "Unmapped to node " << current_restriction.to.node
<< " of restriction";
continue;
}
current_restriction.to.node = internal_id_iter->second;
}
EdgeWeight weight;
NodeID source, target;
unsigned nameID;
int length;
short dir; // direction (0 = open, 1 = forward, 2+ = open)
bool is_roundabout, ignore_in_grid, is_access_restricted, is_split;
TravelMode travel_mode;
/**
* Reads a .osrm file and produces the edges.
*/
NodeID loadEdgesFromFile(std::istream &input_stream,
std::vector<NodeBasedEdge> &edge_list)
{
EdgeID m; EdgeID m;
input_stream.read(reinterpret_cast<char *>(&m), sizeof(unsigned)); input_stream.read(reinterpret_cast<char *>(&m), sizeof(unsigned));
edge_list.reserve(m); edge_list.resize(m);
SimpleLogger().Write() << " and " << m << " edges "; SimpleLogger().Write() << " and " << m << " edges ";
for (EdgeID i = 0; i < m; ++i) input_stream.read((char *) edge_list.data(), m * sizeof(NodeBasedEdge));
{
input_stream.read(reinterpret_cast<char *>(&source), sizeof(unsigned));
input_stream.read(reinterpret_cast<char *>(&target), sizeof(unsigned));
input_stream.read(reinterpret_cast<char *>(&length), sizeof(int));
input_stream.read(reinterpret_cast<char *>(&dir), sizeof(short));
input_stream.read(reinterpret_cast<char *>(&weight), sizeof(int));
input_stream.read(reinterpret_cast<char *>(&nameID), sizeof(unsigned));
input_stream.read(reinterpret_cast<char *>(&is_roundabout), sizeof(bool));
input_stream.read(reinterpret_cast<char *>(&ignore_in_grid), sizeof(bool));
input_stream.read(reinterpret_cast<char *>(&is_access_restricted), sizeof(bool));
input_stream.read(reinterpret_cast<char *>(&travel_mode), sizeof(TravelMode));
input_stream.read(reinterpret_cast<char *>(&is_split), sizeof(bool));
BOOST_ASSERT_MSG(length > 0, "loaded null length edge"); BOOST_ASSERT(edge_list.size() > 0);
BOOST_ASSERT_MSG(weight > 0, "loaded null weight");
BOOST_ASSERT_MSG(0 <= dir && dir <= 2, "loaded bogus direction");
bool forward = true;
bool backward = true;
if (1 == dir)
{
backward = false;
}
if (2 == dir)
{
forward = false;
}
// translate the external NodeIDs to internal IDs
auto internal_id_iter = ext_to_int_id_map.find(source);
if (ext_to_int_id_map.find(source) == ext_to_int_id_map.end())
{
#ifndef NDEBUG #ifndef NDEBUG
SimpleLogger().Write(logWARNING) << " unresolved source NodeID: " << source; SimpleLogger().Write() << "Validating loaded edges...";
#endif std::sort(edge_list.begin(), edge_list.end(),
continue; [](const NodeBasedEdge& lhs, const NodeBasedEdge& rhs)
} {
source = internal_id_iter->second; return (lhs.source < rhs.source) || (lhs.source == rhs.source && lhs.target < rhs.target);
internal_id_iter = ext_to_int_id_map.find(target); });
if (ext_to_int_id_map.find(target) == ext_to_int_id_map.end()) for (auto i = 1u; i < edge_list.size(); ++i)
{
#ifndef NDEBUG
SimpleLogger().Write(logWARNING) << "unresolved target NodeID : " << target;
#endif
continue;
}
target = internal_id_iter->second;
BOOST_ASSERT_MSG(source != SPECIAL_NODEID && target != SPECIAL_NODEID,
"nonexisting source or target");
if (source > target)
{
std::swap(source, target);
std::swap(forward, backward);
}
edge_list.emplace_back(source, target, nameID, weight, forward, backward, is_roundabout,
ignore_in_grid, is_access_restricted, travel_mode, is_split);
}
ext_to_int_id_map.clear();
tbb::parallel_sort(edge_list.begin(), edge_list.end());
for (unsigned i = 1; i < edge_list.size(); ++i)
{ {
if ((edge_list[i - 1].target == edge_list[i].target) && const auto& edge = edge_list[i];
(edge_list[i - 1].source == edge_list[i].source)) const auto& prev_edge = edge_list[i-1];
{
const bool edge_flags_equivalent = (edge_list[i - 1].forward == edge_list[i].forward) &&
(edge_list[i - 1].backward == edge_list[i].backward);
const bool edge_flags_are_superset1 =
(edge_list[i - 1].forward && edge_list[i - 1].backward) &&
(edge_list[i].forward != edge_list[i].backward);
const bool edge_flags_are_superset_2 =
(edge_list[i].forward && edge_list[i].backward) &&
(edge_list[i - 1].forward != edge_list[i - 1].backward);
if (edge_flags_equivalent) BOOST_ASSERT_MSG(edge.weight > 0, "loaded null weight");
{ BOOST_ASSERT_MSG(edge.forward, "edge must be oriented in forward direction");
edge_list[i].weight = std::min(edge_list[i - 1].weight, edge_list[i].weight); BOOST_ASSERT_MSG(edge.travel_mode != TRAVEL_MODE_INACCESSIBLE, "loaded non-accessible");
edge_list[i - 1].source = SPECIAL_NODEID;
} BOOST_ASSERT_MSG(edge.source != edge.target, "loaded edges contain a loop");
else if (edge_flags_are_superset1) BOOST_ASSERT_MSG(edge.source != prev_edge.source || edge.target != prev_edge.target, "loaded edges contain a multi edge");
{
if (edge_list[i - 1].weight <= edge_list[i].weight)
{
// edge i-1 is smaller and goes in both directions. Throw away the other edge
edge_list[i].source = SPECIAL_NODEID;
}
else
{
// edge i-1 is open in both directions, but edge i is smaller in one direction.
// Close edge i-1 in this direction
edge_list[i - 1].forward = !edge_list[i].forward;
edge_list[i - 1].backward = !edge_list[i].backward;
}
}
else if (edge_flags_are_superset_2)
{
if (edge_list[i - 1].weight <= edge_list[i].weight)
{
// edge i-1 is smaller for one direction. edge i is open in both. close edge i
// in the other direction
edge_list[i].forward = !edge_list[i - 1].forward;
edge_list[i].backward = !edge_list[i - 1].backward;
}
else
{
// edge i is smaller and goes in both direction. Throw away edge i-1
edge_list[i - 1].source = SPECIAL_NODEID;
}
}
}
} }
const auto new_end_iter = #endif
std::remove_if(edge_list.begin(), edge_list.end(), [](const EdgeT &edge)
{
return edge.source == SPECIAL_NODEID || edge.target == SPECIAL_NODEID;
});
edge_list.erase(new_end_iter, edge_list.end()); // remove excess candidates.
edge_list.shrink_to_fit();
SimpleLogger().Write() << "Graph loaded ok and has " << edge_list.size() << " edges"; SimpleLogger().Write() << "Graph loaded ok and has " << edge_list.size() << " edges";
return n;
return m;
} }
template <typename NodeT, typename EdgeT> template <typename NodeT, typename EdgeT>
@ -502,9 +187,10 @@ unsigned readHSGRFromStream(const boost::filesystem::path &hsgr_file,
boost::filesystem::ifstream hsgr_input_stream(hsgr_file, std::ios::binary); boost::filesystem::ifstream hsgr_input_stream(hsgr_file, std::ios::binary);
FingerPrint fingerprint_loaded, fingerprint_orig; const FingerPrint fingerprint_valid = FingerPrint::GetValid();
FingerPrint fingerprint_loaded;
hsgr_input_stream.read(reinterpret_cast<char *>(&fingerprint_loaded), sizeof(FingerPrint)); hsgr_input_stream.read(reinterpret_cast<char *>(&fingerprint_loaded), sizeof(FingerPrint));
if (!fingerprint_loaded.TestGraphUtil(fingerprint_orig)) if (!fingerprint_loaded.TestGraphUtil(fingerprint_valid))
{ {
SimpleLogger().Write(logWARNING) << ".hsgr was prepared with different build.\n" SimpleLogger().Write(logWARNING) << ".hsgr was prepared with different build.\n"
"Reprocess to get rid of this warning."; "Reprocess to get rid of this warning.";

View File

@ -90,7 +90,7 @@ template <typename Iterator> struct iso_8601_grammar : qi::grammar<Iterator>
unsigned get_duration() const unsigned get_duration() const
{ {
unsigned temp = 10 * (3600 * hours + 60 * minutes + seconds); unsigned temp = (3600 * hours + 60 * minutes + seconds);
if (temp == 0) if (temp == 0)
{ {
temp = std::numeric_limits<unsigned>::max(); temp = std::numeric_limits<unsigned>::max();

View File

@ -55,11 +55,10 @@ inline bool lua_function_exists(lua_State *lua_state, const char *name)
// See http://lua-users.org/wiki/PackagePath for details on the package.path syntax. // See http://lua-users.org/wiki/PackagePath for details on the package.path syntax.
inline void luaAddScriptFolderToLoadPath(lua_State *lua_state, const char *file_name) inline void luaAddScriptFolderToLoadPath(lua_State *lua_state, const char *file_name)
{ {
const boost::filesystem::path profile_path(file_name); boost::filesystem::path profile_path = boost::filesystem::canonical(file_name);
std::string folder = profile_path.parent_path().string(); std::string folder = profile_path.parent_path().string();
// TODO: This code is most probably not Windows safe since it uses UNIX'ish path delimiters // TODO: This code is most probably not Windows safe since it uses UNIX'ish path delimiters
const std::string lua_code = const std::string lua_code = "package.path = \"" + folder + "/?.lua;\" .. package.path";
"package.path = \"" + folder + "/?.lua;profiles/?.lua;\" .. package.path";
luaL_dostring(lua_state, lua_code.c_str()); luaL_dostring(lua_state, lua_code.c_str());
} }