Merge branch 'develop'

This commit is contained in:
Patrick Niklaus 2015-12-24 11:22:16 +01:00
commit a2e114e852
191 changed files with 7734 additions and 5541 deletions

4
.clang-tidy Normal file
View File

@ -0,0 +1,4 @@
---
Checks: '-clang-analyzer-*,google-*,llvm-*,misc-*,readability-*,-google-build-explicit-make-pair,-google-explicit-constructor,-google-readability-braces-around-statements,-google-readability-casting,-google-readability-namespace-comments,-google-readability-function,-google-readability-todo,-google-runtime-int,-llvm-namespace-comment,-llvm-header-guard,-llvm-twine-local,-misc-argument-comment,-readability-braces-around-statements,-readability-identifier-naming'
...

2
.gitignore vendored
View File

@ -40,8 +40,6 @@ Thumbs.db
# build related files # # build related files #
####################### #######################
/build/ /build/
/util/fingerprint_impl.hpp
/util/git_sha.cpp
/cmake/postinst /cmake/postinst
# Eclipse related files # # Eclipse related files #

View File

@ -1,63 +1,146 @@
language: cpp language: cpp
compiler: sudo: required
- gcc dist: trusty
# - clang
# Make sure CMake is installed notifications:
install: email: false
- sudo apt-add-repository -y ppa:ubuntu-toolchain-r/test
- sudo add-apt-repository -y ppa:boost-latest/ppa
- sudo apt-get update >/dev/null
- sudo apt-get -q install libbz2-dev libstxxl-dev libstxxl1 libxml2-dev libzip-dev lua5.1 liblua5.1-0-dev rubygems libtbb-dev
- sudo apt-get -q install g++-4.8
- sudo apt-get install libboost1.54-all-dev
- sudo apt-get install libgdal-dev
# luabind
- curl https://gist.githubusercontent.com/DennisOSRM/f2eb7b948e6fe1ae319e/raw/install-luabind.sh | sudo bash
# osmosis
- curl -s https://gist.githubusercontent.com/DennisOSRM/803a64a9178ec375069f/raw/ | sudo bash
# cmake
- curl -s https://gist.githubusercontent.com/DennisOSRM/5fad9bee5c7f09fd7fc9/raw/ | sudo bash
before_script:
- rvm use 1.9.3
- gem install bundler
- bundle install
- mkdir build
- cd build
- cmake .. $CMAKEOPTIONS -DBUILD_TOOLS=1
script:
- make
- make tests
- make benchmarks
- ./algorithm-tests
- ./datastructure-tests
- cd ..
- cucumber -p verify
after_script:
# - cd ..
# - cucumber -p verify
branches: branches:
only: only:
- master - master
- develop - develop
cache:
- bundler
- apt
env:
- CMAKEOPTIONS="-DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=g++-4.8" OSRM_PORT=5000 OSRM_TIMEOUT=60
- CMAKEOPTIONS="-DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_COMPILER=g++-4.8" OSRM_PORT=5010 OSRM_TIMEOUT=60
- CMAKEOPTIONS="-DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=ON -DCMAKE_CXX_COMPILER=g++-4.8" OSRM_PORT=5020 OSRM_TIMEOUT=60
notifications:
slack: mapbox:4A6euphDwfxAQnhLurXbu6A1
irc:
channels:
- irc.oftc.net#osrm
on_success: change
on_failure: always
use_notice: true
skip_join: false
recipients: matrix:
- patrick@mapbox.com include:
email:
on_success: change # 1/ Linux Clang Builds
on_failure: always
- os: linux
compiler: clang
addons: &clang38
apt:
sources: ['llvm-toolchain-precise', 'ubuntu-toolchain-r-test']
packages: ['clang-3.8', 'libbz2-dev', 'libstxxl-dev', 'libstxxl1', 'libxml2-dev', 'libzip-dev', 'lua5.1', 'liblua5.1-0-dev', 'rubygems-integration', 'libtbb-dev', 'libgdal-dev', 'libluabind-dev', 'libboost-all-dev']
env: COMPILER='clang++-3.8' BUILD_TYPE='Release'
- os: linux
compiler: clang
addons: &clang38
apt:
sources: ['llvm-toolchain-precise', 'ubuntu-toolchain-r-test']
packages: ['clang-3.8', 'libbz2-dev', 'libstxxl-dev', 'libstxxl1', 'libxml2-dev', 'libzip-dev', 'lua5.1', 'liblua5.1-0-dev', 'rubygems-integration', 'libtbb-dev', 'libgdal-dev', 'libluabind-dev', 'libboost-all-dev']
env: COMPILER='clang++-3.8' BUILD_TYPE='Release' BUILD_SHARED_LIBS=ON
- os: linux
compiler: clang
addons: *clang38
env: COMPILER='clang++-3.8' BUILD_TYPE='Debug'
# 2/ Linux GCC Builds
- os: linux
compiler: gcc
addons: &gcc48
apt:
sources: ['ubuntu-toolchain-r-test']
packages: ['g++-4.8', 'libbz2-dev', 'libstxxl-dev', 'libstxxl1', 'libxml2-dev', 'libzip-dev', 'lua5.1', 'liblua5.1-0-dev', 'rubygems-integration', 'libtbb-dev', 'libgdal-dev', 'libluabind-dev', 'libboost-all-dev']
env: COMPILER='g++-4.8' BUILD_TYPE='Release'
- os: linux
compiler: gcc
addons: *gcc48
env: COMPILER='g++-4.8' BUILD_TYPE='Debug'
- os: linux
compiler: gcc
addons: &gcc5
apt:
sources: ['ubuntu-toolchain-r-test']
packages: ['g++-5', 'libbz2-dev', 'libstxxl-dev', 'libstxxl1', 'libxml2-dev', 'libzip-dev', 'lua5.1', 'liblua5.1-0-dev', 'rubygems-integration', 'libtbb-dev', 'libgdal-dev', 'libluabind-dev', 'libboost-all-dev']
env: COMPILER='g++-5' BUILD_TYPE='Release'
- os: linux
compiler: gcc
addons: &gcc5
apt:
sources: ['ubuntu-toolchain-r-test']
packages: ['g++-5', 'libbz2-dev', 'libstxxl-dev', 'libstxxl1', 'libxml2-dev', 'libzip-dev', 'lua5.1', 'liblua5.1-0-dev', 'rubygems-integration', 'libtbb-dev', 'libgdal-dev', 'libluabind-dev', 'libboost-all-dev']
env: COMPILER='g++-5' BUILD_TYPE='Release' BUILD_SHARED_LIBS=ON
- os: linux
compiler: gcc
addons: *gcc5
env: COMPILER='g++-5' BUILD_TYPE='Debug'
# Disabled until tests all pass on OSX:
#
# 3/ OSX Clang Builds
#- os: osx
# osx_image: xcode6.4
# compiler: clang
# env: COMPILER='clang++' BUILD_TYPE='Debug'
#- os: osx
# osx_image: xcode6.4
# compiler: clang
# env: COMPILER='clang++' BUILD_TYPE='Release'
#- os: osx
# osx_image: xcode6.4
# compiler: clang
# env: COMPILER='clang++' BUILD_TYPE='Release' BUILD_SHARED_LIBS=ON
#- os: osx
# osx_image: xcode7
# compiler: clang
# env: COMPILER='clang++' BUILD_TYPE='Debug'
#- os: osx
# osx_image: xcode7
# compiler: clang
# env: COMPILER='clang++' BUILD_TYPE='Release'
#- os: osx
# osx_image: xcode7
# compiler: clang
# env: COMPILER='clang++' BUILD_TYPE='Release' BUILD_SHARED_LIBS=ON
install:
- DEPS_DIR="${TRAVIS_BUILD_DIR}/deps"
- mkdir -p ${DEPS_DIR} && cd ${DEPS_DIR}
- |
if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then
CMAKE_URL="http://www.cmake.org/files/v3.3/cmake-3.3.2-Linux-x86_64.tar.gz"
mkdir cmake && travis_retry wget --quiet -O - ${CMAKE_URL} | tar --strip-components=1 -xz -C cmake
export PATH=${DEPS_DIR}/cmake/bin:${PATH}
OSMOSIS_URL="http://bretth.dev.openstreetmap.org/osmosis-build/osmosis-latest.tgz"
mkdir osmosis && travis_retry wget --quiet -O - ${OSMOSIS_URL} | tar -xz -C osmosis
export PATH=${DEPS_DIR}/osmosis/bin:${PATH}
elif [[ "${TRAVIS_OS_NAME}" == "osx" ]]; then
brew install cmake boost libzip libstxxl libxml2 lua51 luabind tbb GDAL osmosis
fi
before_script:
- cd ${TRAVIS_BUILD_DIR}
- rvm use 1.9.3
- gem install bundler
- bundle install
- mkdir build && cd build
- export CXX=${COMPILER}
- export OSRM_PORT=5000 OSRM_TIMEOUT=60
- cmake .. -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DBUILD_SHARED_LIBS=${BUILD_SHARED_LIBS:-OFF} -DBUILD_TOOLS=1
script:
- make --jobs=2
- make tests --jobs=2
- make benchmarks
- ./algorithm-tests
- ./datastructure-tests
- ./util-tests
- cd ..
- cucumber -p verify

View File

@ -7,13 +7,15 @@ This process created the file `CMakeCache.txt' and the directory `CMakeFiles'. P
endif() endif()
project(OSRM C CXX) project(OSRM C CXX)
set(OSRM_VERSION_MAJOR 4)
set(OSRM_VERSION_MINOR 9)
set(OSRM_VERSION_PATCH 0)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON) set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
include(CheckCXXCompilerFlag) include(CheckCXXCompilerFlag)
include(FindPackageHandleStandardArgs) include(FindPackageHandleStandardArgs)
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
include(GetGitRevisionDescription)
git_describe(GIT_DESCRIPTION)
set(bitness 32) set(bitness 32)
if(CMAKE_SIZEOF_VOID_P EQUAL 8) if(CMAKE_SIZEOF_VOID_P EQUAL 8)
@ -28,29 +30,32 @@ if(WIN32 AND MSVC_VERSION LESS 1800)
endif() endif()
option(ENABLE_JSON_LOGGING "Adds additional JSON debug logging to the response" OFF) option(ENABLE_JSON_LOGGING "Adds additional JSON debug logging to the response" OFF)
option(WITH_TOOLS "Build OSRM tools" OFF) option(DEBUG_GEOMETRY "Enables an option to dump GeoJSON of the final routing graph" OFF)
option(BUILD_TOOLS "Build OSRM tools" OFF) option(BUILD_TOOLS "Build OSRM tools" OFF)
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
include_directories(${CMAKE_CURRENT_BINARY_DIR})
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include/) include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include/)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/third_party/) include_directories(SYSTEM ${CMAKE_CURRENT_SOURCE_DIR}/third_party/)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/third_party/libosmium/include/) include_directories(SYSTEM ${CMAKE_CURRENT_SOURCE_DIR}/third_party/libosmium/include/)
add_custom_target(FingerPrintConfigure ALL add_custom_target(FingerPrintConfigure ALL ${CMAKE_COMMAND}
${CMAKE_COMMAND} -DSOURCE_DIR=${CMAKE_SOURCE_DIR} "-DOUTPUT_DIR=${CMAKE_CURRENT_BINARY_DIR}"
-P ${CMAKE_CURRENT_SOURCE_DIR}/cmake/FingerPrint-Config.cmake "-DSOURCE_DIR=${CMAKE_CURRENT_SOURCE_DIR}"
-P "${CMAKE_CURRENT_SOURCE_DIR}/cmake/FingerPrint-Config.cmake"
COMMENT "Configuring revision fingerprint" COMMENT "Configuring revision fingerprint"
VERBATIM) VERBATIM)
add_custom_target(tests DEPENDS datastructure-tests algorithm-tests) add_custom_target(tests DEPENDS datastructure-tests algorithm-tests util-tests)
add_custom_target(benchmarks DEPENDS rtree-bench) add_custom_target(benchmarks DEPENDS rtree-bench)
set(BOOST_COMPONENTS date_time filesystem iostreams program_options regex system thread unit_test_framework) set(BOOST_COMPONENTS date_time filesystem iostreams program_options regex system thread unit_test_framework)
configure_file( configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/util/git_sha.cpp.in ${CMAKE_CURRENT_SOURCE_DIR}/util/version.hpp.in
${CMAKE_CURRENT_SOURCE_DIR}/util/git_sha.cpp ${CMAKE_CURRENT_BINARY_DIR}/util/version.hpp
) )
file(GLOB ExtractorGlob extractor/*.cpp) file(GLOB ExtractorGlob extractor/*.cpp data_structures/hilbert_value.cpp)
file(GLOB ImporterGlob data_structures/import_edge.cpp data_structures/external_memory_node.cpp data_structures/raster_source.cpp) file(GLOB ImporterGlob data_structures/import_edge.cpp data_structures/external_memory_node.cpp data_structures/raster_source.cpp)
add_library(IMPORT OBJECT ${ImporterGlob}) add_library(IMPORT OBJECT ${ImporterGlob})
add_library(LOGGER OBJECT util/simple_logger.cpp) add_library(LOGGER OBJECT util/simple_logger.cpp)
@ -61,7 +66,7 @@ add_library(MERCATOR OBJECT util/mercator.cpp)
add_library(ANGLE OBJECT util/compute_angle.cpp) add_library(ANGLE OBJECT util/compute_angle.cpp)
set(ExtractorSources extract.cpp ${ExtractorGlob}) set(ExtractorSources extract.cpp ${ExtractorGlob})
add_executable(osrm-extract ${ExtractorSources} $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:GITDESCRIPTION> $<TARGET_OBJECTS:IMPORT> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR>) add_executable(osrm-extract ${ExtractorSources} $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:IMPORT> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR> $<TARGET_OBJECTS:COMPRESSEDEDGE> $<TARGET_OBJECTS:GRAPHCOMPRESSOR> $<TARGET_OBJECTS:RESTRICTION> $<TARGET_OBJECTS:ANGLE>)
add_library(RESTRICTION OBJECT data_structures/restriction_map.cpp) add_library(RESTRICTION OBJECT data_structures/restriction_map.cpp)
add_library(COMPRESSEDEDGE OBJECT data_structures/compressed_edge_container.cpp) add_library(COMPRESSEDEDGE OBJECT data_structures/compressed_edge_container.cpp)
@ -69,7 +74,7 @@ add_library(GRAPHCOMPRESSOR OBJECT algorithms/graph_compressor.cpp)
file(GLOB PrepareGlob contractor/*.cpp data_structures/hilbert_value.cpp {RestrictionMapGlob}) file(GLOB PrepareGlob contractor/*.cpp data_structures/hilbert_value.cpp {RestrictionMapGlob})
set(PrepareSources prepare.cpp ${PrepareGlob}) set(PrepareSources prepare.cpp ${PrepareGlob})
add_executable(osrm-prepare ${PrepareSources} $<TARGET_OBJECTS:ANGLE> $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:GITDESCRIPTION> $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:IMPORT> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:RESTRICTION> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR> $<TARGET_OBJECTS:COMPRESSEDEDGE> $<TARGET_OBJECTS:GRAPHCOMPRESSOR>) add_executable(osrm-prepare ${PrepareSources} $<TARGET_OBJECTS:ANGLE> $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:IMPORT> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:RESTRICTION> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR> $<TARGET_OBJECTS:COMPRESSEDEDGE> $<TARGET_OBJECTS:GRAPHCOMPRESSOR>)
file(GLOB ServerGlob server/*.cpp) file(GLOB ServerGlob server/*.cpp)
file(GLOB DescriptorGlob descriptors/*.cpp) file(GLOB DescriptorGlob descriptors/*.cpp)
@ -80,6 +85,7 @@ file(GLOB HttpGlob server/http/*.cpp)
file(GLOB LibOSRMGlob library/*.cpp) file(GLOB LibOSRMGlob library/*.cpp)
file(GLOB DataStructureTestsGlob unit_tests/data_structures/*.cpp data_structures/hilbert_value.cpp) file(GLOB DataStructureTestsGlob unit_tests/data_structures/*.cpp data_structures/hilbert_value.cpp)
file(GLOB AlgorithmTestsGlob unit_tests/algorithms/*.cpp algorithms/graph_compressor.cpp) file(GLOB AlgorithmTestsGlob unit_tests/algorithms/*.cpp algorithms/graph_compressor.cpp)
file(GLOB UtilTestsGlob unit_tests/util/*.cpp)
set( set(
OSRMSources OSRMSources
@ -91,8 +97,7 @@ set(
) )
add_library(COORDINATE OBJECT ${CoordinateGlob}) add_library(COORDINATE OBJECT ${CoordinateGlob})
add_library(GITDESCRIPTION OBJECT util/git_sha.cpp) add_library(OSRM ${OSRMSources} $<TARGET_OBJECTS:ANGLE> $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:RESTRICTION> $<TARGET_OBJECTS:PHANTOMNODE> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR> $<TARGET_OBJECTS:IMPORT>)
add_library(OSRM ${OSRMSources} $<TARGET_OBJECTS:ANGLE> $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:GITDESCRIPTION> $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:RESTRICTION> $<TARGET_OBJECTS:PHANTOMNODE> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR> $<TARGET_OBJECTS:IMPORT>)
add_library(FINGERPRINT OBJECT util/fingerprint.cpp) add_library(FINGERPRINT OBJECT util/fingerprint.cpp)
add_dependencies(FINGERPRINT FingerPrintConfigure) add_dependencies(FINGERPRINT FingerPrintConfigure)
@ -100,11 +105,12 @@ add_dependencies(OSRM FingerPrintConfigure)
set_target_properties(FINGERPRINT PROPERTIES LINKER_LANGUAGE CXX) set_target_properties(FINGERPRINT PROPERTIES LINKER_LANGUAGE CXX)
add_executable(osrm-routed routed.cpp ${ServerGlob} $<TARGET_OBJECTS:EXCEPTION>) add_executable(osrm-routed routed.cpp ${ServerGlob} $<TARGET_OBJECTS:EXCEPTION>)
add_executable(osrm-datastore datastore.cpp $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:GITDESCRIPTION> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR>) add_executable(osrm-datastore datastore.cpp $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR>)
# Unit tests # Unit tests
add_executable(datastructure-tests EXCLUDE_FROM_ALL unit_tests/datastructure_tests.cpp ${DataStructureTestsGlob} $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:PHANTOMNODE> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR> $<TARGET_OBJECTS:COMPRESSEDEDGE> $<TARGET_OBJECTS:GRAPHCOMPRESSOR> $<TARGET_OBJECTS:RESTRICTION> $<TARGET_OBJECTS:RASTERSOURCE>) add_executable(datastructure-tests EXCLUDE_FROM_ALL unit_tests/datastructure_tests.cpp ${DataStructureTestsGlob} $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:PHANTOMNODE> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR> $<TARGET_OBJECTS:COMPRESSEDEDGE> $<TARGET_OBJECTS:GRAPHCOMPRESSOR> $<TARGET_OBJECTS:RESTRICTION> $<TARGET_OBJECTS:RASTERSOURCE>)
add_executable(algorithm-tests EXCLUDE_FROM_ALL unit_tests/algorithm_tests.cpp ${AlgorithmTestsGlob} $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:PHANTOMNODE> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:RESTRICTION> $<TARGET_OBJECTS:COMPRESSEDEDGE>) add_executable(algorithm-tests EXCLUDE_FROM_ALL unit_tests/algorithm_tests.cpp ${AlgorithmTestsGlob} $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:PHANTOMNODE> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:RESTRICTION> $<TARGET_OBJECTS:COMPRESSEDEDGE>)
add_executable(util-tests EXCLUDE_FROM_ALL unit_tests/util_tests.cpp ${UtilTestsGlob})
# Benchmarks # Benchmarks
add_executable(rtree-bench EXCLUDE_FROM_ALL benchmarks/static_rtree.cpp $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:PHANTOMNODE> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR>) add_executable(rtree-bench EXCLUDE_FROM_ALL benchmarks/static_rtree.cpp $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:PHANTOMNODE> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR>)
@ -116,18 +122,29 @@ endif()
if(CMAKE_BUILD_TYPE MATCHES Debug) if(CMAKE_BUILD_TYPE MATCHES Debug)
message(STATUS "Configuring OSRM in debug mode") message(STATUS "Configuring OSRM in debug mode")
if(NOT ${CMAKE_CXX_COMPILER_ID} STREQUAL "MSVC") if(NOT ${CMAKE_CXX_COMPILER_ID} STREQUAL "MSVC")
message(STATUS "adding profiling flags")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage -fno-inline") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-inline -fno-omit-frame-pointer")
set(CMAKE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-arcs -ftest-coverage -fno-inline")
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Og -ggdb")
endif()
endif() endif()
endif() endif()
if(CMAKE_BUILD_TYPE MATCHES Release) if(CMAKE_BUILD_TYPE MATCHES Release)
message(STATUS "Configuring OSRM in release mode") message(STATUS "Configuring OSRM in release mode")
# Check if LTO is available # Check if LTO is available
set(LTO_FLAGS "")
check_cxx_compiler_flag("-flto" LTO_AVAILABLE) check_cxx_compiler_flag("-flto" LTO_AVAILABLE)
if(LTO_AVAILABLE) if(LTO_AVAILABLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -flto") set(OLD_CXX_FLAGS ${CMAKE_CXX_FLAGS})
# GCC in addition allows parallelizing LTO
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
include(ProcessorCount)
ProcessorCount(NPROC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -flto=${NPROC}")
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -flto")
endif()
set(CHECK_LTO_SRC "int main(){return 0;}") set(CHECK_LTO_SRC "int main(){return 0;}")
check_cxx_source_compiles("${CHECK_LTO_SRC}" LTO_WORKS) check_cxx_source_compiles("${CHECK_LTO_SRC}" LTO_WORKS)
if(LTO_WORKS) if(LTO_WORKS)
@ -144,6 +161,11 @@ if(CMAKE_BUILD_TYPE MATCHES Release)
set(CMAKE_AR "/usr/bin/gcc-ar") set(CMAKE_AR "/usr/bin/gcc-ar")
set(CMAKE_RANLIB "/usr/bin/gcc-ranlib") set(CMAKE_RANLIB "/usr/bin/gcc-ranlib")
endif() endif()
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" AND "${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS "4.9.0")
message(STATUS "Disabling LTO on GCC < 4.9.0 since it is broken, see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=57038")
set(CMAKE_CXX_FLAGS "${OLD_CXX_FLAGS}")
endif()
endif() endif()
endif() endif()
@ -153,9 +175,7 @@ endif()
# Configuring compilers # Configuring compilers
if(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang") if(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang")
# using Clang set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -pedantic -Wuninitialized -Wunreachable-code -Wstrict-overflow=2 -D_FORTIFY_SOURCE=2 -fPIC")
# -Weverything -Wno-c++98-compat -Wno-shadow -Wno-exit-time-destructors
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wunreachable-code -pedantic -fPIC")
elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU")
set(COLOR_FLAG "-fdiagnostics-color=auto") set(COLOR_FLAG "-fdiagnostics-color=auto")
check_cxx_compiler_flag("-fdiagnostics-color=auto" HAS_COLOR_FLAG) check_cxx_compiler_flag("-fdiagnostics-color=auto" HAS_COLOR_FLAG)
@ -163,7 +183,7 @@ elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU")
set(COLOR_FLAG "") set(COLOR_FLAG "")
endif() endif()
# using GCC # using GCC
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -pedantic -fPIC ${COLOR_FLAG}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -pedantic -Wuninitialized -Wunreachable-code -Wstrict-overflow=1 -D_FORTIFY_SOURCE=2 ${COLOR_FLAG} -fPIC")
if(WIN32) # using mingw if(WIN32) # using mingw
add_definitions(-D_USE_MATH_DEFINES) # define M_PI, M_1_PI etc. add_definitions(-D_USE_MATH_DEFINES) # define M_PI, M_1_PI etc.
add_definitions(-DWIN32) add_definitions(-DWIN32)
@ -184,6 +204,26 @@ elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL "MSVC")
target_link_libraries(osrm-extract wsock32 ws2_32) target_link_libraries(osrm-extract wsock32 ws2_32)
endif() endif()
# Configuring linker
execute_process(COMMAND ${CMAKE_CXX_COMPILER} "-Wl,--version" ERROR_QUIET OUTPUT_VARIABLE LINKER_VERSION)
# For ld.gold and ld.bfs (the GNU linkers) we optimize hard
if("${LINKER_VERSION}" MATCHES "GNU gold" OR "${LINKER_VERSION}" MATCHES "GNU ld")
message(STATUS "Setting linker optimizations")
if(NOT ${CMAKE_CXX_COMPILER_ID} STREQUAL "MSVC")
# Tell compiler to put every function in separate section, linker can then match sections and functions
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ffunction-sections -fdata-sections")
# Tell linker to do dead code and data eminination during link time discarding sections
set(LINKER_FLAGS "${LINKER_FLAGS} -Wl,--gc-sections")
endif()
# Default linker optimization flags
set(LINKER_FLAGS "${LINKER_FLAGS} -Wl,-O1 -Wl,--hash-style=gnu -Wl,--sort-common")
else()
message(STATUS "Using unknown linker, not setting linker optimizations")
endif ()
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${LINKER_FLAGS}")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${LINKER_FLAGS}")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${LINKER_FLAGS}")
# Activate C++11 # Activate C++11
if(NOT ${CMAKE_CXX_COMPILER_ID} STREQUAL "MSVC") if(NOT ${CMAKE_CXX_COMPILER_ID} STREQUAL "MSVC")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 ") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 ")
@ -211,12 +251,11 @@ if(UNIX AND NOT APPLE)
endif() endif()
#Check Boost #Check Boost
set(BOOST_MIN_VERSION "1.49.0") find_package(Boost 1.49.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED)
find_package(Boost ${BOOST_MIN_VERSION} COMPONENTS ${BOOST_COMPONENTS} REQUIRED)
if(NOT Boost_FOUND) if(NOT Boost_FOUND)
message(FATAL_ERROR "Fatal error: Boost (version >= 1.49.0) required.\n") message(FATAL_ERROR "Fatal error: Boost (version >= 1.49.0) required.\n")
endif() endif()
include_directories(${Boost_INCLUDE_DIRS}) include_directories(SYSTEM ${Boost_INCLUDE_DIRS})
target_link_libraries(OSRM ${Boost_LIBRARIES}) target_link_libraries(OSRM ${Boost_LIBRARIES})
target_link_libraries(osrm-extract ${Boost_LIBRARIES}) target_link_libraries(osrm-extract ${Boost_LIBRARIES})
@ -225,6 +264,7 @@ target_link_libraries(osrm-routed ${Boost_LIBRARIES} ${OPTIONAL_SOCKET_LIBS} OSR
target_link_libraries(osrm-datastore ${Boost_LIBRARIES}) target_link_libraries(osrm-datastore ${Boost_LIBRARIES})
target_link_libraries(datastructure-tests ${Boost_LIBRARIES}) target_link_libraries(datastructure-tests ${Boost_LIBRARIES})
target_link_libraries(algorithm-tests ${Boost_LIBRARIES} ${OPTIONAL_SOCKET_LIBS} OSRM) target_link_libraries(algorithm-tests ${Boost_LIBRARIES} ${OPTIONAL_SOCKET_LIBS} OSRM)
target_link_libraries(util-tests ${Boost_LIBRARIES})
target_link_libraries(rtree-bench ${Boost_LIBRARIES}) target_link_libraries(rtree-bench ${Boost_LIBRARIES})
find_package(Threads REQUIRED) find_package(Threads REQUIRED)
@ -247,12 +287,12 @@ target_link_libraries(osrm-routed ${TBB_LIBRARIES})
target_link_libraries(datastructure-tests ${TBB_LIBRARIES}) target_link_libraries(datastructure-tests ${TBB_LIBRARIES})
target_link_libraries(algorithm-tests ${TBB_LIBRARIES}) target_link_libraries(algorithm-tests ${TBB_LIBRARIES})
target_link_libraries(rtree-bench ${TBB_LIBRARIES}) target_link_libraries(rtree-bench ${TBB_LIBRARIES})
include_directories(${TBB_INCLUDE_DIR}) include_directories(SYSTEM ${TBB_INCLUDE_DIR})
find_package( Luabind REQUIRED ) find_package( Luabind REQUIRED )
include(check_luabind) include(check_luabind)
include_directories(${LUABIND_INCLUDE_DIR}) include_directories(SYSTEM ${LUABIND_INCLUDE_DIR})
target_link_libraries(osrm-extract ${LUABIND_LIBRARY}) target_link_libraries(osrm-extract ${LUABIND_LIBRARY})
target_link_libraries(osrm-prepare ${LUABIND_LIBRARY}) target_link_libraries(osrm-prepare ${LUABIND_LIBRARY})
@ -263,17 +303,18 @@ else()
target_link_libraries(osrm-extract ${LUA_LIBRARY}) target_link_libraries(osrm-extract ${LUA_LIBRARY})
target_link_libraries(osrm-prepare ${LUA_LIBRARY}) target_link_libraries(osrm-prepare ${LUA_LIBRARY})
endif() endif()
include_directories(${LUA_INCLUDE_DIR}) include_directories(SYSTEM ${LUA_INCLUDE_DIR})
find_package(EXPAT REQUIRED) find_package(EXPAT REQUIRED)
include_directories(${EXPAT_INCLUDE_DIRS}) include_directories(SYSTEM ${EXPAT_INCLUDE_DIRS})
target_link_libraries(osrm-extract ${EXPAT_LIBRARIES}) target_link_libraries(osrm-extract ${EXPAT_LIBRARIES})
find_package(STXXL REQUIRED) find_package(STXXL REQUIRED)
include_directories(${STXXL_INCLUDE_DIR}) include_directories(SYSTEM ${STXXL_INCLUDE_DIR})
target_link_libraries(OSRM ${STXXL_LIBRARY}) target_link_libraries(OSRM ${STXXL_LIBRARY})
target_link_libraries(osrm-extract ${STXXL_LIBRARY}) target_link_libraries(osrm-extract ${STXXL_LIBRARY})
target_link_libraries(osrm-prepare ${STXXL_LIBRARY}) target_link_libraries(osrm-prepare ${STXXL_LIBRARY})
target_link_libraries(datastructure-tests ${STXXL_LIBRARY})
set(OpenMP_FIND_QUIETLY ON) set(OpenMP_FIND_QUIETLY ON)
find_package(OpenMP) find_package(OpenMP)
@ -283,11 +324,11 @@ if(OPENMP_FOUND)
endif() endif()
find_package(BZip2 REQUIRED) find_package(BZip2 REQUIRED)
include_directories(${BZIP_INCLUDE_DIRS}) include_directories(SYSTEM ${BZIP_INCLUDE_DIRS})
target_link_libraries(osrm-extract ${BZIP2_LIBRARIES}) target_link_libraries(osrm-extract ${BZIP2_LIBRARIES})
find_package(ZLIB REQUIRED) find_package(ZLIB REQUIRED)
include_directories(${ZLIB_INCLUDE_DIRS}) include_directories(SYSTEM ${ZLIB_INCLUDE_DIRS})
target_link_libraries(osrm-extract ${ZLIB_LIBRARY}) target_link_libraries(osrm-extract ${ZLIB_LIBRARY})
target_link_libraries(osrm-routed ${ZLIB_LIBRARY}) target_link_libraries(osrm-routed ${ZLIB_LIBRARY})
@ -296,16 +337,19 @@ if (ENABLE_JSON_LOGGING)
add_definitions(-DENABLE_JSON_LOGGING) add_definitions(-DENABLE_JSON_LOGGING)
endif() endif()
if(WITH_TOOLS OR BUILD_TOOLS) if (DEBUG_GEOMETRY)
message(STATUS "Enabling final edge weight GeoJSON output option")
add_definitions(-DDEBUG_GEOMETRY)
endif()
if(BUILD_TOOLS)
message(STATUS "Activating OSRM internal tools") message(STATUS "Activating OSRM internal tools")
find_package(GDAL) find_package(GDAL)
if(GDAL_FOUND) if(GDAL_FOUND)
add_executable(osrm-components tools/components.cpp $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:IMPORT> $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:RESTRICTION> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR>) add_executable(osrm-components tools/components.cpp $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:IMPORT> $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:RESTRICTION> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR>)
target_link_libraries(osrm-components ${TBB_LIBRARIES}) target_link_libraries(osrm-components ${TBB_LIBRARIES})
include_directories(${GDAL_INCLUDE_DIR}) include_directories(SYSTEM ${GDAL_INCLUDE_DIR})
target_link_libraries( target_link_libraries(osrm-components ${GDAL_LIBRARIES} ${Boost_LIBRARIES})
osrm-components
${GDAL_LIBRARIES} ${Boost_LIBRARIES})
install(TARGETS osrm-components DESTINATION bin) install(TARGETS osrm-components DESTINATION bin)
else() else()
message(FATAL_ERROR "libgdal and/or development headers not found") message(FATAL_ERROR "libgdal and/or development headers not found")
@ -313,16 +357,16 @@ if(WITH_TOOLS OR BUILD_TOOLS)
add_executable(osrm-cli tools/simpleclient.cpp $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:COORDINATE>) add_executable(osrm-cli tools/simpleclient.cpp $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:COORDINATE>)
target_link_libraries(osrm-cli ${Boost_LIBRARIES} ${OPTIONAL_SOCKET_LIBS} OSRM) target_link_libraries(osrm-cli ${Boost_LIBRARIES} ${OPTIONAL_SOCKET_LIBS} OSRM)
target_link_libraries(osrm-cli ${TBB_LIBRARIES}) target_link_libraries(osrm-cli ${TBB_LIBRARIES})
add_executable(osrm-io-benchmark tools/io-benchmark.cpp $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:GITDESCRIPTION> $<TARGET_OBJECTS:LOGGER>) add_executable(osrm-io-benchmark tools/io-benchmark.cpp $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:LOGGER>)
target_link_libraries(osrm-io-benchmark ${Boost_LIBRARIES}) target_link_libraries(osrm-io-benchmark ${Boost_LIBRARIES})
add_executable(osrm-unlock-all tools/unlock_all_mutexes.cpp $<TARGET_OBJECTS:GITDESCRIPTION> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:EXCEPTION>) add_executable(osrm-unlock-all tools/unlock_all_mutexes.cpp $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:EXCEPTION>)
target_link_libraries(osrm-unlock-all ${Boost_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) target_link_libraries(osrm-unlock-all ${Boost_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
if(UNIX AND NOT APPLE) if(UNIX AND NOT APPLE)
target_link_libraries(osrm-unlock-all rt) target_link_libraries(osrm-unlock-all rt)
endif() endif()
add_executable(osrm-check-hsgr tools/check-hsgr.cpp $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:IMPORT>) add_executable(osrm-check-hsgr tools/check-hsgr.cpp $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:IMPORT>)
target_link_libraries(osrm-check-hsgr ${Boost_LIBRARIES} ${TBB_LIBRARIES}) target_link_libraries(osrm-check-hsgr ${Boost_LIBRARIES} ${TBB_LIBRARIES})
add_executable(osrm-springclean tools/springclean.cpp $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:GITDESCRIPTION> $<TARGET_OBJECTS:EXCEPTION>) add_executable(osrm-springclean tools/springclean.cpp $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:EXCEPTION>)
target_link_libraries(osrm-springclean ${Boost_LIBRARIES}) target_link_libraries(osrm-springclean ${Boost_LIBRARIES})
install(TARGETS osrm-cli DESTINATION bin) install(TARGETS osrm-cli DESTINATION bin)
@ -332,7 +376,7 @@ if(WITH_TOOLS OR BUILD_TOOLS)
install(TARGETS osrm-springclean DESTINATION bin) install(TARGETS osrm-springclean DESTINATION bin)
endif() endif()
file(GLOB InstallGlob include/osrm/*.hpp library/osrm.hpp) file(GLOB InstallGlob include/osrm/*.hpp)
file(GLOB VariantGlob third_party/variant/*.hpp) file(GLOB VariantGlob third_party/variant/*.hpp)
# Add RPATH info to executables so that when they are run after being installed # Add RPATH info to executables so that when they are run after being installed
@ -350,6 +394,7 @@ install(TARGETS osrm-prepare DESTINATION bin)
install(TARGETS osrm-datastore DESTINATION bin) install(TARGETS osrm-datastore DESTINATION bin)
install(TARGETS osrm-routed DESTINATION bin) install(TARGETS osrm-routed DESTINATION bin)
install(TARGETS OSRM DESTINATION lib) install(TARGETS OSRM DESTINATION lib)
list(GET Boost_LIBRARIES 1 BOOST_LIBRARY_FIRST) list(GET Boost_LIBRARIES 1 BOOST_LIBRARY_FIRST)
get_filename_component(BOOST_LIBRARY_LISTING "${BOOST_LIBRARY_FIRST}" PATH) get_filename_component(BOOST_LIBRARY_LISTING "${BOOST_LIBRARY_FIRST}" PATH)
set(BOOST_LIBRARY_LISTING "-L${BOOST_LIBRARY_LISTING}") set(BOOST_LIBRARY_LISTING "-L${BOOST_LIBRARY_LISTING}")
@ -358,6 +403,14 @@ foreach(lib ${Boost_LIBRARIES})
string(REPLACE "lib" "" BOOST_LIBRARY_NAME ${BOOST_LIBRARY_NAME}) string(REPLACE "lib" "" BOOST_LIBRARY_NAME ${BOOST_LIBRARY_NAME})
set(BOOST_LIBRARY_LISTING "${BOOST_LIBRARY_LISTING} -l${BOOST_LIBRARY_NAME}") set(BOOST_LIBRARY_LISTING "${BOOST_LIBRARY_LISTING} -l${BOOST_LIBRARY_NAME}")
endforeach() endforeach()
list(GET TBB_LIBRARIES 1 TBB_LIBRARY_FIRST)
get_filename_component(TBB_LIBRARY_LISTING "${TBB_LIBRARY_FIRST}" PATH)
set(TBB_LIBRARY_LISTING "-L${TBB_LIBRARY_LISTING}")
foreach(lib ${TBB_LIBRARIES})
get_filename_component(TBB_LIBRARY_NAME "${lib}" NAME_WE)
string(REPLACE "lib" "" TBB_LIBRARY_NAME ${TBB_LIBRARY_NAME})
set(TBB_LIBRARY_LISTING "${TBB_LIBRARY_LISTING} -l${TBB_LIBRARY_NAME}")
endforeach()
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cmake/pkgconfig.in libosrm.pc @ONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cmake/pkgconfig.in libosrm.pc @ONLY)
install(FILES ${PROJECT_BINARY_DIR}/libosrm.pc DESTINATION lib/pkgconfig) install(FILES ${PROJECT_BINARY_DIR}/libosrm.pc DESTINATION lib/pkgconfig)
@ -366,3 +419,24 @@ if(BUILD_DEBIAN_PACKAGE)
include(CPackDebianConfig) include(CPackDebianConfig)
include(CPack) include(CPack)
endif() endif()
# add a target to generate API documentation with Doxygen
find_package(Doxygen)
if(DOXYGEN_FOUND)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile @ONLY)
add_custom_target(doc
${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating API documentation with Doxygen" VERBATIM
)
endif()
# prefix compilation with ccache by default if available and on clang or gcc
if(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang" OR ${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU")
find_program(CCACHE_FOUND ccache)
if(CCACHE_FOUND)
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache)
set(ENV{CCACHE_CPP2} "true")
endif()
endif()

43
Doxyfile.in Normal file
View File

@ -0,0 +1,43 @@
PROJECT_NAME = "Project OSRM"
PROJECT_BRIEF = "Open Source Routing Machine"
BUILTIN_STL_SUPPORT = YES
EXTRACT_ALL = YES
EXTRACT_PRIVATE = YES
EXTRACT_PACKAGE = YES
EXTRACT_STATIC = YES
EXTRACT_LOCAL_CLASSES = YES
EXTRACT_ANON_NSPACES = YES
QUIET = YES
INPUT = @CMAKE_CURRENT_SOURCE_DIR@
USE_MDFILE_AS_MAINPAGE = @CMAKE_CURRENT_SOURCE_DIR@/README.md
FILE_PATTERNS = *.h *.hpp *.c *.cc *.cpp *.md
RECURSIVE = YES
EXCLUDE = @CMAKE_CURRENT_SOURCE_DIR@/third_party \
@CMAKE_CURRENT_SOURCE_DIR@/build \
@CMAKE_CURRENT_SOURCE_DIR@/unit_tests \
@CMAKE_CURRENT_SOURCE_DIR@/benchmarks \
@CMAKE_CURRENT_SOURCE_DIR@/features
SOURCE_BROWSER = YES
CLANG_ASSISTED_PARSING = NO
HTML_COLORSTYLE_HUE = 217
HTML_COLORSTYLE_SAT = 71
HTML_COLORSTYLE_GAMMA = 50
GENERATE_TREEVIEW = YES
HAVE_DOT = @DOXYGEN_DOT_FOUND@
CALL_GRAPH = YES
CALLER_GRAPH = YES
DOT_IMAGE_FORMAT = svg
INTERACTIVE_SVG = YES
DOT_GRAPH_MAX_NODES = 500
DOT_TRANSPARENT = YES
DOT_MULTI_TARGETS = YES

View File

@ -49,7 +49,7 @@ constexpr static const float earth_radius = 6372797.560856f;
namespace coordinate_calculation namespace coordinate_calculation
{ {
double great_circle_distance(const int lat1, double haversine_distance(const int lat1,
const int lon1, const int lon1,
const int lat2, const int lat2,
const int lon2) const int lon2)
@ -77,21 +77,21 @@ double great_circle_distance(const int lat1,
return earth_radius * cHarv; return earth_radius * cHarv;
} }
double great_circle_distance(const FixedPointCoordinate &coordinate_1, double haversine_distance(const FixedPointCoordinate &coordinate_1,
const FixedPointCoordinate &coordinate_2) const FixedPointCoordinate &coordinate_2)
{ {
return great_circle_distance(coordinate_1.lat, coordinate_1.lon, coordinate_2.lat, return haversine_distance(coordinate_1.lat, coordinate_1.lon, coordinate_2.lat,
coordinate_2.lon); coordinate_2.lon);
} }
float euclidean_distance(const FixedPointCoordinate &coordinate_1, float great_circle_distance(const FixedPointCoordinate &coordinate_1,
const FixedPointCoordinate &coordinate_2) const FixedPointCoordinate &coordinate_2)
{ {
return euclidean_distance(coordinate_1.lat, coordinate_1.lon, coordinate_2.lat, return great_circle_distance(coordinate_1.lat, coordinate_1.lon, coordinate_2.lat,
coordinate_2.lon); coordinate_2.lon);
} }
float euclidean_distance(const int lat1, float great_circle_distance(const int lat1,
const int lon1, const int lon1,
const int lat2, const int lat2,
const int lon2) const int lon2)
@ -224,7 +224,7 @@ float perpendicular_distance_from_projected_coordinate(
BOOST_ASSERT(nearest_location.is_valid()); BOOST_ASSERT(nearest_location.is_valid());
const float approximate_distance = const float approximate_distance =
euclidean_distance(query_location, nearest_location); great_circle_distance(query_location, nearest_location);
BOOST_ASSERT(0.f <= approximate_distance); BOOST_ASSERT(0.f <= approximate_distance);
return approximate_distance; return approximate_distance;
} }

View File

@ -36,15 +36,15 @@ struct FixedPointCoordinate;
namespace coordinate_calculation namespace coordinate_calculation
{ {
double double
great_circle_distance(const int lat1, const int lon1, const int lat2, const int lon2); haversine_distance(const int lat1, const int lon1, const int lat2, const int lon2);
double great_circle_distance(const FixedPointCoordinate &first_coordinate, double haversine_distance(const FixedPointCoordinate &first_coordinate,
const FixedPointCoordinate &second_coordinate); const FixedPointCoordinate &second_coordinate);
float euclidean_distance(const FixedPointCoordinate &first_coordinate, float great_circle_distance(const FixedPointCoordinate &first_coordinate,
const FixedPointCoordinate &second_coordinate); const FixedPointCoordinate &second_coordinate);
float euclidean_distance(const int lat1, const int lon1, const int lat2, const int lon2); float great_circle_distance(const int lat1, const int lon1, const int lat2, const int lon2);
void lat_or_lon_to_string(const int value, std::string &output); void lat_or_lon_to_string(const int value, std::string &output);

View File

@ -0,0 +1,180 @@
#ifndef GEOSPATIAL_QUERY_HPP
#define GEOSPATIAL_QUERY_HPP
#include "coordinate_calculation.hpp"
#include "../typedefs.h"
#include "../data_structures/phantom_node.hpp"
#include "../util/bearing.hpp"
#include <osrm/coordinate.hpp>
#include <vector>
#include <memory>
#include <algorithm>
// Implements complex queries on top of an RTree and builds PhantomNodes from it.
//
// Only holds a weak reference on the RTree!
template <typename RTreeT> class GeospatialQuery
{
using EdgeData = typename RTreeT::EdgeData;
using CoordinateList = typename RTreeT::CoordinateList;
public:
GeospatialQuery(RTreeT &rtree_, std::shared_ptr<CoordinateList> coordinates_)
: rtree(rtree_), coordinates(coordinates_)
{
}
// Returns nearest PhantomNodes in the given bearing range within max_distance.
// Does not filter by small/big component!
std::vector<PhantomNodeWithDistance>
NearestPhantomNodesInRange(const FixedPointCoordinate &input_coordinate,
const float max_distance,
const int bearing = 0,
const int bearing_range = 180)
{
auto results =
rtree.Nearest(input_coordinate,
[this, bearing, bearing_range, max_distance](const EdgeData &data)
{
return checkSegmentBearing(data, bearing, bearing_range);
},
[max_distance](const std::size_t, const float min_dist)
{
return min_dist > max_distance;
});
return MakePhantomNodes(input_coordinate, results);
}
// Returns max_results nearest PhantomNodes in the given bearing range.
// Does not filter by small/big component!
std::vector<PhantomNodeWithDistance>
NearestPhantomNodes(const FixedPointCoordinate &input_coordinate,
const unsigned max_results,
const int bearing = 0,
const int bearing_range = 180)
{
auto results = rtree.Nearest(input_coordinate,
[this, bearing, bearing_range](const EdgeData &data)
{
return checkSegmentBearing(data, bearing, bearing_range);
},
[max_results](const std::size_t num_results, const float)
{
return num_results >= max_results;
});
return MakePhantomNodes(input_coordinate, results);
}
// Returns the nearest phantom node. If this phantom node is not from a big component
// a second phantom node is return that is the nearest coordinate in a big component.
std::pair<PhantomNode, PhantomNode>
NearestPhantomNodeWithAlternativeFromBigComponent(const FixedPointCoordinate &input_coordinate,
const int bearing = 0,
const int bearing_range = 180)
{
bool has_small_component = false;
bool has_big_component = false;
auto results = rtree.Nearest(
input_coordinate,
[this, bearing, bearing_range, &has_big_component,
&has_small_component](const EdgeData &data)
{
auto use_segment =
(!has_small_component || (!has_big_component && !data.component.is_tiny));
auto use_directions = std::make_pair(use_segment, use_segment);
if (use_segment)
{
use_directions = checkSegmentBearing(data, bearing, bearing_range);
if (use_directions.first || use_directions.second)
{
has_big_component = has_big_component || !data.component.is_tiny;
has_small_component = has_small_component || data.component.is_tiny;
}
}
return use_directions;
},
[&has_big_component](const std::size_t num_results, const float)
{
return num_results > 0 && has_big_component;
});
if (results.size() == 0)
{
return std::make_pair(PhantomNode{}, PhantomNode{});
}
BOOST_ASSERT(results.size() > 0);
return std::make_pair(MakePhantomNode(input_coordinate, results.front()).phantom_node,
MakePhantomNode(input_coordinate, results.back()).phantom_node);
}
private:
std::vector<PhantomNodeWithDistance>
MakePhantomNodes(const FixedPointCoordinate &input_coordinate,
const std::vector<EdgeData> &results) const
{
std::vector<PhantomNodeWithDistance> distance_and_phantoms(results.size());
std::transform(results.begin(), results.end(), distance_and_phantoms.begin(),
[this, &input_coordinate](const EdgeData &data)
{
return MakePhantomNode(input_coordinate, data);
});
return distance_and_phantoms;
}
PhantomNodeWithDistance MakePhantomNode(const FixedPointCoordinate &input_coordinate,
const EdgeData &data) const
{
FixedPointCoordinate point_on_segment;
float ratio;
const auto current_perpendicular_distance = coordinate_calculation::perpendicular_distance(
coordinates->at(data.u), coordinates->at(data.v), input_coordinate, point_on_segment,
ratio);
auto transformed =
PhantomNodeWithDistance { PhantomNode{data, point_on_segment}, current_perpendicular_distance };
ratio = std::min(1.f, std::max(0.f, ratio));
if (SPECIAL_NODEID != transformed.phantom_node.forward_node_id)
{
transformed.phantom_node.forward_weight *= ratio;
}
if (SPECIAL_NODEID != transformed.phantom_node.reverse_node_id)
{
transformed.phantom_node.reverse_weight *= 1.f - ratio;
}
return transformed;
}
std::pair<bool, bool> checkSegmentBearing(const EdgeData &segment,
const float filter_bearing,
const float filter_bearing_range)
{
const float forward_edge_bearing =
coordinate_calculation::bearing(coordinates->at(segment.u), coordinates->at(segment.v));
const float backward_edge_bearing = (forward_edge_bearing + 180) > 360
? (forward_edge_bearing - 180)
: (forward_edge_bearing + 180);
const bool forward_bearing_valid =
bearing::CheckInBounds(forward_edge_bearing, filter_bearing, filter_bearing_range) &&
segment.forward_edge_based_node_id != SPECIAL_NODEID;
const bool backward_bearing_valid =
bearing::CheckInBounds(backward_edge_bearing, filter_bearing, filter_bearing_range) &&
segment.reverse_edge_based_node_id != SPECIAL_NODEID;
return std::make_pair(forward_bearing_valid, backward_bearing_valid);
}
RTreeT &rtree;
const std::shared_ptr<CoordinateList> coordinates;
};
#endif

View File

@ -29,7 +29,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../typedefs.h" #include "../typedefs.h"
#include "../contractor/speed_profile.hpp" #include "../extractor/speed_profile.hpp"
#include "../data_structures/node_based_graph.hpp" #include "../data_structures/node_based_graph.hpp"
#include <memory> #include <memory>

View File

@ -28,14 +28,13 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef OBJECT_ENCODER_HPP #ifndef OBJECT_ENCODER_HPP
#define OBJECT_ENCODER_HPP #define OBJECT_ENCODER_HPP
#include "../util/string_util.hpp"
#include <boost/assert.hpp> #include <boost/assert.hpp>
#include <boost/archive/iterators/base64_from_binary.hpp> #include <boost/archive/iterators/base64_from_binary.hpp>
#include <boost/archive/iterators/binary_from_base64.hpp> #include <boost/archive/iterators/binary_from_base64.hpp>
#include <boost/archive/iterators/transform_width.hpp> #include <boost/archive/iterators/transform_width.hpp>
#include <algorithm> #include <algorithm>
#include <iterator>
#include <string> #include <string>
#include <vector> #include <vector>
@ -66,8 +65,8 @@ struct ObjectEncoder
encoded.resize(sizeof(ObjectT)); encoded.resize(sizeof(ObjectT));
encoded.assign(base64_t(&data[0]), encoded.assign(base64_t(&data[0]),
base64_t(&data[0] + (data.size() - number_of_padded_chars))); base64_t(&data[0] + (data.size() - number_of_padded_chars)));
replaceAll(encoded, "+", "-"); std::replace(begin(encoded), end(encoded), '+', '-');
replaceAll(encoded, "/", "_"); std::replace(begin(encoded), end(encoded), '/', '_');
} }
template <class ObjectT> static void DecodeFromBase64(const std::string &input, ObjectT &object) template <class ObjectT> static void DecodeFromBase64(const std::string &input, ObjectT &object)
@ -75,9 +74,8 @@ struct ObjectEncoder
try try
{ {
std::string encoded(input); std::string encoded(input);
// replace "-" with "+" and "_" with "/" std::replace(begin(encoded), end(encoded), '-', '+');
replaceAll(encoded, "-", "+"); std::replace(begin(encoded), end(encoded), '_', '/');
replaceAll(encoded, "_", "/");
std::copy(binary_t(encoded.begin()), binary_t(encoded.begin() + encoded.length()), std::copy(binary_t(encoded.begin()), binary_t(encoded.begin() + encoded.length()),
reinterpret_cast<char *>(&object)); reinterpret_cast<char *>(&object));

View File

@ -73,6 +73,8 @@ std::vector<NodeID> BruteForceTrip(const NodeIDIterator start,
const std::size_t number_of_locations, const std::size_t number_of_locations,
const DistTableWrapper<EdgeWeight> &dist_table) const DistTableWrapper<EdgeWeight> &dist_table)
{ {
(void)number_of_locations; // unused
const auto component_size = std::distance(start, end); const auto component_size = std::distance(start, end);
std::vector<NodeID> perm(start, end); std::vector<NodeID> perm(start, end);
@ -103,4 +105,4 @@ std::vector<NodeID> BruteForceTrip(const NodeIDIterator start,
} // end namespace trip } // end namespace trip
} // end namespace osrm } // end namespace osrm
#endif // TRIP_BRUTE_FORCE_HPP #endif // TRIP_BRUTE_FORCE_HPP

View File

@ -54,6 +54,7 @@ GetShortestRoundTrip(const NodeID new_loc,
const std::size_t number_of_locations, const std::size_t number_of_locations,
std::vector<NodeID> &route) std::vector<NodeID> &route)
{ {
(void)number_of_locations; // unused
auto min_trip_distance = INVALID_EDGE_WEIGHT; auto min_trip_distance = INVALID_EDGE_WEIGHT;
NodeIDIter next_insert_point_candidate; NodeIDIter next_insert_point_candidate;
@ -76,7 +77,13 @@ GetShortestRoundTrip(const NodeID new_loc,
BOOST_ASSERT_MSG(dist_from != INVALID_EDGE_WEIGHT, "distance has invalid edge weight"); BOOST_ASSERT_MSG(dist_from != INVALID_EDGE_WEIGHT, "distance has invalid edge weight");
BOOST_ASSERT_MSG(dist_to != INVALID_EDGE_WEIGHT, "distance has invalid edge weight"); BOOST_ASSERT_MSG(dist_to != INVALID_EDGE_WEIGHT, "distance has invalid edge weight");
BOOST_ASSERT_MSG(trip_dist >= 0, "previous trip was not minimal. something's wrong"); // This is not neccessarily true:
// Lets say you have an edge (u, v) with duration 100. If you place a coordinate exactly in
// the middle of the segment yielding (u, v'), the adjusted duration will be 100 * 0.5 = 50.
// Now imagine two coordinates. One placed at 0.99 and one at 0.999. This means (u, v') now
// has a duration of 100 * 0.99 = 99, but (u, v'') also has a duration of 100 * 0.995 = 99.
// In which case (v', v'') has a duration of 0.
// BOOST_ASSERT_MSG(trip_dist >= 0, "previous trip was not minimal. something's wrong");
// from all possible insertions to the current trip, choose the shortest of all insertions // from all possible insertions to the current trip, choose the shortest of all insertions
if (trip_dist < min_trip_distance) if (trip_dist < min_trip_distance)
@ -118,7 +125,7 @@ std::vector<NodeID> FindRoute(const std::size_t &number_of_locations,
for (std::size_t j = 2; j < component_size; ++j) for (std::size_t j = 2; j < component_size; ++j)
{ {
auto farthest_distance = 0; auto farthest_distance = std::numeric_limits<int>::min();
auto next_node = -1; auto next_node = -1;
NodeIDIter next_insert_point; NodeIDIter next_insert_point;

View File

@ -6,6 +6,8 @@ ECHO ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %~f0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
SET PROJECT_DIR=%CD% SET PROJECT_DIR=%CD%
ECHO PROJECT_DIR^: %PROJECT_DIR% ECHO PROJECT_DIR^: %PROJECT_DIR%
ECHO NUMBER_OF_PROCESSORS^: %NUMBER_OF_PROCESSORS%
ECHO cmake^: && cmake --version
ECHO activating VS command prompt ... ECHO activating VS command prompt ...
SET PATH=C:\Program Files (x86)\MSBuild\14.0\Bin;%PATH% SET PATH=C:\Program Files (x86)\MSBuild\14.0\Bin;%PATH%
@ -50,7 +52,7 @@ set TBB_ARCH_PLATFORM=intel64/vc14
ECHO calling cmake .... ECHO calling cmake ....
cmake .. ^ cmake .. ^
-G "Visual Studio 14 Win64" ^ -G "Visual Studio 14 2015 Win64" ^
-DBOOST_ROOT=%BOOST_ROOT% ^ -DBOOST_ROOT=%BOOST_ROOT% ^
-DBoost_ADDITIONAL_VERSIONS=1.58 ^ -DBoost_ADDITIONAL_VERSIONS=1.58 ^
-DBoost_USE_MULTITHREADED=ON ^ -DBoost_USE_MULTITHREADED=ON ^

View File

@ -25,16 +25,16 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#include "../data_structures/original_edge_data.hpp"
#include "../data_structures/query_node.hpp" #include "../data_structures/query_node.hpp"
#include "../data_structures/shared_memory_vector_wrapper.hpp"
#include "../data_structures/static_rtree.hpp" #include "../data_structures/static_rtree.hpp"
#include "../util/boost_filesystem_2_fix.hpp"
#include "../data_structures/edge_based_node.hpp" #include "../data_structures/edge_based_node.hpp"
#include "../algorithms/geospatial_query.hpp"
#include "../util/timing_util.hpp"
#include <osrm/coordinate.hpp> #include <osrm/coordinate.hpp>
#include <random> #include <random>
#include <iostream>
// Choosen by a fair W20 dice roll (this value is completely arbitrary) // Choosen by a fair W20 dice roll (this value is completely arbitrary)
constexpr unsigned RANDOM_SEED = 13; constexpr unsigned RANDOM_SEED = 13;
@ -46,6 +46,7 @@ constexpr int32_t WORLD_MAX_LON = 180 * COORDINATE_PRECISION;
using RTreeLeaf = EdgeBasedNode; using RTreeLeaf = EdgeBasedNode;
using FixedPointCoordinateListPtr = std::shared_ptr<std::vector<FixedPointCoordinate>>; using FixedPointCoordinateListPtr = std::shared_ptr<std::vector<FixedPointCoordinate>>;
using BenchStaticRTree = StaticRTree<RTreeLeaf, ShM<FixedPointCoordinate, false>::vector, false>; using BenchStaticRTree = StaticRTree<RTreeLeaf, ShM<FixedPointCoordinate, false>::vector, false>;
using BenchQuery = GeospatialQuery<BenchStaticRTree>;
FixedPointCoordinateListPtr LoadCoordinates(const boost::filesystem::path &nodes_file) FixedPointCoordinateListPtr LoadCoordinates(const boost::filesystem::path &nodes_file)
{ {
@ -66,7 +67,28 @@ FixedPointCoordinateListPtr LoadCoordinates(const boost::filesystem::path &nodes
return coords; return coords;
} }
void Benchmark(BenchStaticRTree &rtree, unsigned num_queries) template <typename QueryT>
void BenchmarkQuery(const std::vector<FixedPointCoordinate> &queries,
const std::string& name,
QueryT query)
{
std::cout << "Running " << name << " with " << queries.size() << " coordinates: " << std::flush;
TIMER_START(query);
for (const auto &q : queries)
{
auto result = query(q);
}
TIMER_STOP(query);
std::cout << "Took " << TIMER_SEC(query) << " seconds "
<< "(" << TIMER_MSEC(query) << "ms"
<< ") -> " << TIMER_MSEC(query) / queries.size() << " ms/query "
<< "(" << TIMER_MSEC(query) << "ms"
<< ")" << std::endl;
}
void Benchmark(BenchStaticRTree &rtree, BenchQuery &geo_query, unsigned num_queries)
{ {
std::mt19937 mt_rand(RANDOM_SEED); std::mt19937 mt_rand(RANDOM_SEED);
std::uniform_int_distribution<> lat_udist(WORLD_MIN_LAT, WORLD_MAX_LAT); std::uniform_int_distribution<> lat_udist(WORLD_MIN_LAT, WORLD_MAX_LAT);
@ -74,91 +96,36 @@ void Benchmark(BenchStaticRTree &rtree, unsigned num_queries)
std::vector<FixedPointCoordinate> queries; std::vector<FixedPointCoordinate> queries;
for (unsigned i = 0; i < num_queries; i++) for (unsigned i = 0; i < num_queries; i++)
{ {
queries.emplace_back(FixedPointCoordinate(lat_udist(mt_rand), lon_udist(mt_rand))); queries.emplace_back(lat_udist(mt_rand), lon_udist(mt_rand));
} }
{ BenchmarkQuery(queries, "raw RTree queries (1 result)", [&rtree](const FixedPointCoordinate &q)
const unsigned num_results = 5; {
std::cout << "#### IncrementalFindPhantomNodeForCoordinate : " << num_results return rtree.Nearest(q, 1);
<< " phantom nodes" });
<< "\n"; BenchmarkQuery(queries, "raw RTree queries (10 results)",
[&rtree](const FixedPointCoordinate &q)
{
return rtree.Nearest(q, 10);
});
TIMER_START(query_phantom); BenchmarkQuery(queries, "big component alternative queries",
std::vector<PhantomNode> phantom_node_vector; [&geo_query](const FixedPointCoordinate &q)
for (const auto &q : queries) {
{ return geo_query.NearestPhantomNodeWithAlternativeFromBigComponent(q);
phantom_node_vector.clear(); });
rtree.IncrementalFindPhantomNodeForCoordinate(q, phantom_node_vector, 3, num_results); BenchmarkQuery(queries, "max distance 1000", [&geo_query](const FixedPointCoordinate &q)
phantom_node_vector.clear(); {
rtree.IncrementalFindPhantomNodeForCoordinate(q, phantom_node_vector, 17, num_results); return geo_query.NearestPhantomNodesInRange(q, 1000);
} });
TIMER_STOP(query_phantom); BenchmarkQuery(queries, "PhantomNode query (1 result)", [&geo_query](const FixedPointCoordinate &q)
{
std::cout << "Took " << TIMER_MSEC(query_phantom) << " msec for " << num_queries return geo_query.NearestPhantomNodes(q, 1);
<< " queries." });
<< "\n"; BenchmarkQuery(queries, "PhantomNode query (10 result)", [&geo_query](const FixedPointCoordinate &q)
std::cout << TIMER_MSEC(query_phantom) / ((double)num_queries) << " msec/query." {
<< "\n"; return geo_query.NearestPhantomNodes(q, 10);
});
std::cout << "#### LocateClosestEndPointForCoordinate"
<< "\n";
}
TIMER_START(query_endpoint);
FixedPointCoordinate result;
for (const auto &q : queries)
{
rtree.LocateClosestEndPointForCoordinate(q, result, 3);
}
TIMER_STOP(query_endpoint);
std::cout << "Took " << TIMER_MSEC(query_endpoint) << " msec for " << num_queries << " queries."
<< "\n";
std::cout << TIMER_MSEC(query_endpoint) / ((double)num_queries) << " msec/query."
<< "\n";
std::cout << "#### FindPhantomNodeForCoordinate"
<< "\n";
TIMER_START(query_node);
for (const auto &q : queries)
{
PhantomNode phantom;
rtree.FindPhantomNodeForCoordinate(q, phantom, 3);
}
TIMER_STOP(query_node);
std::cout << "Took " << TIMER_MSEC(query_node) << " msec for " << num_queries << " queries."
<< "\n";
std::cout << TIMER_MSEC(query_node) / ((double)num_queries) << " msec/query."
<< "\n";
{
const unsigned num_results = 1;
std::cout << "#### IncrementalFindPhantomNodeForCoordinate : " << num_results
<< " phantom nodes"
<< "\n";
TIMER_START(query_phantom);
std::vector<PhantomNode> phantom_node_vector;
for (const auto &q : queries)
{
phantom_node_vector.clear();
rtree.IncrementalFindPhantomNodeForCoordinate(q, phantom_node_vector, 3, num_results);
phantom_node_vector.clear();
rtree.IncrementalFindPhantomNodeForCoordinate(q, phantom_node_vector, 17, num_results);
}
TIMER_STOP(query_phantom);
std::cout << "Took " << TIMER_MSEC(query_phantom) << " msec for " << num_queries
<< " queries."
<< "\n";
std::cout << TIMER_MSEC(query_phantom) / ((double)num_queries) << " msec/query."
<< "\n";
std::cout << "#### LocateClosestEndPointForCoordinate"
<< "\n";
}
} }
int main(int argc, char **argv) int main(int argc, char **argv)
@ -177,8 +144,9 @@ int main(int argc, char **argv)
auto coords = LoadCoordinates(nodesPath); auto coords = LoadCoordinates(nodesPath);
BenchStaticRTree rtree(ramPath, filePath, coords); BenchStaticRTree rtree(ramPath, filePath, coords);
BenchQuery query(rtree, coords);
Benchmark(rtree, 10000); Benchmark(rtree, query, 10000);
return 0; return 0;
} }

View File

@ -11,7 +11,7 @@ SET CONFIGURATION=Release
FOR /F "tokens=*" %%i in ('git rev-parse --abbrev-ref HEAD') do SET APPVEYOR_REPO_BRANCH=%%i FOR /F "tokens=*" %%i in ('git rev-parse --abbrev-ref HEAD') do SET APPVEYOR_REPO_BRANCH=%%i
ECHO APPVEYOR_REPO_BRANCH^: %APPVEYOR_REPO_BRANCH% ECHO APPVEYOR_REPO_BRANCH^: %APPVEYOR_REPO_BRANCH%
SET PATH=C:\mb\windows-builds-64\tmp-bin\cmake-3.1.0-win32-x86\bin;%PATH% SET PATH=C:\mb\windows-builds-64\tmp-bin\cmake-3.4.0-win32-x86\bin;%PATH%
SET PATH=C:\Program Files\7-Zip;%PATH% SET PATH=C:\Program Files\7-Zip;%PATH%
powershell Set-ExecutionPolicy -Scope CurrentUser -ExecutionPolicy Unrestricted -Force powershell Set-ExecutionPolicy -Scope CurrentUser -ExecutionPolicy Unrestricted -Force

View File

@ -1,6 +1,6 @@
set(OLDFILE ${SOURCE_DIR}/util/fingerprint_impl.hpp) set(OLDFILE ${OUTPUT_DIR}/util/fingerprint_impl.hpp)
set(NEWFILE ${OLDFILE}.tmp) set(NEWFILE ${OLDFILE}.tmp)
set(INFILE ${OLDFILE}.in) set(INFILE ${SOURCE_DIR}/util/fingerprint_impl.hpp.in)
file(MD5 ${SOURCE_DIR}/prepare.cpp MD5PREPARE) file(MD5 ${SOURCE_DIR}/prepare.cpp MD5PREPARE)
file(MD5 ${SOURCE_DIR}/data_structures/static_rtree.hpp MD5RTREE) file(MD5 ${SOURCE_DIR}/data_structures/static_rtree.hpp MD5RTREE)
file(MD5 ${SOURCE_DIR}/util/graph_loader.hpp MD5GRAPH) file(MD5 ${SOURCE_DIR}/util/graph_loader.hpp MD5GRAPH)
@ -13,7 +13,7 @@ file(MD5 ${NEWFILE} MD5NEW)
if (EXISTS ${OLDFILE}) if (EXISTS ${OLDFILE})
file(MD5 ${OLDFILE} MD5OLD) file(MD5 ${OLDFILE} MD5OLD)
if(NOT ${MD5NEW} STREQUAL ${MD5OLD}) if(NOT ${MD5NEW} STREQUAL ${MD5OLD})
file(REMOVE_RECURSE ${OLDFILE}) file(REMOVE_RECURSE ${OLDFILE})
file(RENAME ${NEWFILE} ${OLDFILE}) file(RENAME ${NEWFILE} ${OLDFILE})
else() else()
file(REMOVE_RECURSE ${NEWFILE}) file(REMOVE_RECURSE ${NEWFILE})

View File

@ -1,123 +0,0 @@
# - Returns a version string from Git
#
# These functions force a re-configure on each git commit so that you can
# trust the values of the variables in your build system.
#
# get_git_head_revision(<refspecvar> <hashvar> [<additional arguments to git describe> ...])
#
# Returns the refspec and sha hash of the current head revision
#
# git_describe(<var> [<additional arguments to git describe> ...])
#
# Returns the results of git describe on the source tree, and adjusting
# the output so that it tests false if an error occurs.
#
# git_get_exact_tag(<var> [<additional arguments to git describe> ...])
#
# Returns the results of git describe --exact-match on the source tree,
# and adjusting the output so that it tests false if there was no exact
# matching tag.
#
# Requires CMake 2.6 or newer (uses the 'function' command)
#
# Original Author:
# 2009-2010 Ryan Pavlik <rpavlik@iastate.edu> <abiryan@ryand.net>
# http://academic.cleardefinition.com
# Iowa State University HCI Graduate Program/VRAC
#
# Copyright Iowa State University 2009-2010.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
if(__get_git_revision_description)
return()
endif()
set(__get_git_revision_description YES)
# We must run the following at "include" time, not at function call time,
# to find the path to this module rather than the path to a calling list file
get_filename_component(_gitdescmoddir ${CMAKE_CURRENT_LIST_FILE} PATH)
function(get_git_head_revision _refspecvar _hashvar)
set(GIT_PARENT_DIR "${CMAKE_SOURCE_DIR}")
set(GIT_DIR "${GIT_PARENT_DIR}/.git")
while(NOT EXISTS "${GIT_DIR}") # .git dir not found, search parent directories
set(GIT_PREVIOUS_PARENT "${GIT_PARENT_DIR}")
get_filename_component(GIT_PARENT_DIR ${GIT_PARENT_DIR} PATH)
if(GIT_PARENT_DIR STREQUAL GIT_PREVIOUS_PARENT)
# We have reached the root directory, we are not in git
set(${_refspecvar} "GITDIR-NOTFOUND" PARENT_SCOPE)
set(${_hashvar} "GITDIR-NOTFOUND" PARENT_SCOPE)
return()
endif()
set(GIT_DIR "${GIT_PARENT_DIR}/.git")
endwhile()
set(GIT_DATA "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/git-data")
if(NOT EXISTS "${GIT_DATA}")
file(MAKE_DIRECTORY "${GIT_DATA}")
endif()
if(NOT EXISTS "${GIT_DIR}/HEAD")
return()
endif()
set(HEAD_FILE "${GIT_DATA}/HEAD")
configure_file("${GIT_DIR}/HEAD" "${HEAD_FILE}" COPYONLY)
configure_file("${_gitdescmoddir}/GetGitRevisionDescription.cmake.in"
"${GIT_DATA}/grabRef.cmake"
@ONLY)
include("${GIT_DATA}/grabRef.cmake")
set(${_refspecvar} "${HEAD_REF}" PARENT_SCOPE)
set(${_hashvar} "${HEAD_HASH}" PARENT_SCOPE)
endfunction()
function(git_describe _var)
if(NOT GIT_FOUND)
find_package(Git QUIET)
endif()
get_git_head_revision(refspec hash)
if(NOT GIT_FOUND)
set(${_var} "GIT-NOTFOUND" PARENT_SCOPE)
return()
endif()
if(NOT hash)
set(${_var} "HEAD-HASH-NOTFOUND" PARENT_SCOPE)
return()
endif()
# TODO sanitize
#if((${ARGN}" MATCHES "&&") OR
# (ARGN MATCHES "||") OR
# (ARGN MATCHES "\\;"))
# message("Please report the following error to the project!")
# message(FATAL_ERROR "Looks like someone's doing something nefarious with git_describe! Passed arguments ${ARGN}")
#endif()
#message(STATUS "Arguments to execute_process: ${ARGN}")
execute_process(COMMAND
"${GIT_EXECUTABLE}"
describe
${hash}
${ARGN}
WORKING_DIRECTORY
"${CMAKE_SOURCE_DIR}"
RESULT_VARIABLE
res
OUTPUT_VARIABLE
out
ERROR_QUIET
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(NOT res EQUAL 0)
set(out "${out}-${res}-NOTFOUND")
endif()
set(${_var} "${out}" PARENT_SCOPE)
endfunction()
function(git_get_exact_tag _var)
git_describe(out --exact-match ${ARGN})
set(${_var} "${out}" PARENT_SCOPE)
endfunction()

View File

@ -1,38 +0,0 @@
#
# Internal file for GetGitRevisionDescription.cmake
#
# Requires CMake 2.6 or newer (uses the 'function' command)
#
# Original Author:
# 2009-2010 Ryan Pavlik <rpavlik@iastate.edu> <abiryan@ryand.net>
# http://academic.cleardefinition.com
# Iowa State University HCI Graduate Program/VRAC
#
# Copyright Iowa State University 2009-2010.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
set(HEAD_HASH)
file(READ "@HEAD_FILE@" HEAD_CONTENTS LIMIT 1024)
string(STRIP "${HEAD_CONTENTS}" HEAD_CONTENTS)
if(HEAD_CONTENTS MATCHES "ref")
# named branch
string(REPLACE "ref: " "" HEAD_REF "${HEAD_CONTENTS}")
if(EXISTS "@GIT_DIR@/${HEAD_REF}")
configure_file("@GIT_DIR@/${HEAD_REF}" "@GIT_DATA@/head-ref" COPYONLY)
elseif(EXISTS "@GIT_DIR@/logs/${HEAD_REF}")
configure_file("@GIT_DIR@/logs/${HEAD_REF}" "@GIT_DATA@/head-ref" COPYONLY)
set(HEAD_HASH "${HEAD_REF}")
endif()
else()
# detached HEAD
configure_file("@GIT_DIR@/HEAD" "@GIT_DATA@/head-ref" COPYONLY)
endif()
if(NOT HEAD_HASH)
file(READ "@GIT_DATA@/head-ref" HEAD_HASH LIMIT 1024)
string(STRIP "${HEAD_HASH}" HEAD_HASH)
endif()

View File

@ -1,7 +1,7 @@
INCLUDE (CheckCXXSourceCompiles) INCLUDE (CheckCXXSourceCompiles)
unset(LUABIND_WORKS CACHE) unset(LUABIND_WORKS CACHE)
unset(LUABIND51_WORKS CACHE) unset(LUABIND51_WORKS CACHE)
set (LUABIND_CHECK_SRC "#include \"lua.h\"\n#include <luabind/luabind.hpp>\n int main() { lua_State *myLuaState = luaL_newstate(); luabind::open(myLuaState); return 0;}") set (LUABIND_CHECK_SRC "extern \"C\" {\n#include \"lua.h\"\n#include \"lauxlib.h\"\n}\n#include <luabind/open.hpp>\nint main() { lua_State *x = luaL_newstate(); luabind::open(x); }")
set (CMAKE_TRY_COMPILE_CONFIGURATION ${CMAKE_BUILD_TYPE}) set (CMAKE_TRY_COMPILE_CONFIGURATION ${CMAKE_BUILD_TYPE})
set (CMAKE_REQUIRED_INCLUDES "${Boost_INCLUDE_DIR};${LUABIND_INCLUDE_DIR};${LUA_INCLUDE_DIR}") set (CMAKE_REQUIRED_INCLUDES "${Boost_INCLUDE_DIR};${LUABIND_INCLUDE_DIR};${LUA_INCLUDE_DIR}")
set (CMAKE_REQUIRED_LIBRARIES "${LUABIND_LIBRARY};${LUA_LIBRARY}") set (CMAKE_REQUIRED_LIBRARIES "${LUABIND_LIBRARY};${LUA_LIBRARY}")

View File

@ -1,11 +1,11 @@
prefix=@CMAKE_INSTALL_PREFIX@ prefix=@CMAKE_INSTALL_PREFIX@
includedir=${prefix}/include/osrm includedir=${prefix}/include
libdir=${prefix}/lib libdir=${prefix}/lib
Name: libOSRM Name: libOSRM
Description: Project OSRM library Description: Project OSRM library
Version: @GIT_DESCRIPTION@ Version: v@OSRM_VERSION_MAJOR@.@OSRM_VERSION_MINOR@.@OSRM_VERSION_PATCH@
Requires: Requires:
Libs: -L${libdir} -lOSRM Libs: -L${libdir} -lOSRM
Libs.private: @BOOST_LIBRARY_LISTING@ Libs.private: @BOOST_LIBRARY_LISTING@ @TBB_LIBRARY_LISTING@
Cflags: -I${includedir} Cflags: -I${includedir}

View File

@ -50,6 +50,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <algorithm> #include <algorithm>
#include <limits> #include <limits>
#include <memory>
#include <vector> #include <vector>
class Contractor class Contractor
@ -157,6 +158,15 @@ class Contractor
public: public:
template <class ContainerT> Contractor(int nodes, ContainerT &input_edge_list) template <class ContainerT> Contractor(int nodes, ContainerT &input_edge_list)
: Contractor(nodes, input_edge_list, {}, {})
{
}
template <class ContainerT>
Contractor(int nodes,
ContainerT &input_edge_list,
std::vector<float> &&node_levels_)
: node_levels(std::move(node_levels_))
{ {
std::vector<ContractorEdge> edges; std::vector<ContractorEdge> edges;
edges.reserve(input_edge_list.size() * 2); edges.reserve(input_edge_list.size() * 2);
@ -172,7 +182,8 @@ class Contractor
SimpleLogger().Write(logWARNING) SimpleLogger().Write(logWARNING)
<< "Edge weight large -> " << "Edge weight large -> "
<< static_cast<unsigned int>(std::max(diter->weight, 1)) << " : " << static_cast<unsigned int>(std::max(diter->weight, 1)) << " : "
<< static_cast<unsigned int>(diter->source) << " -> " << static_cast<unsigned int>(diter->target); << static_cast<unsigned int>(diter->source) << " -> "
<< static_cast<unsigned int>(diter->target);
} }
#endif #endif
edges.emplace_back(diter->source, diter->target, edges.emplace_back(diter->source, diter->target,
@ -284,7 +295,7 @@ class Contractor
~Contractor() {} ~Contractor() {}
void Run( double core_factor = 1.0 ) void Run(double core_factor = 1.0)
{ {
// for the preperation we can use a big grain size, which is much faster (probably cache) // for the preperation we can use a big grain size, which is much faster (probably cache)
constexpr size_t InitGrainSize = 100000; constexpr size_t InitGrainSize = 100000;
@ -303,14 +314,14 @@ class Contractor
ThreadDataContainer thread_data_list(number_of_nodes); ThreadDataContainer thread_data_list(number_of_nodes);
NodeID number_of_contracted_nodes = 0; NodeID number_of_contracted_nodes = 0;
std::vector<RemainingNodeData> remaining_nodes(number_of_nodes); std::vector<NodePriorityData> node_data;
std::vector<float> node_priorities(number_of_nodes); std::vector<float> node_priorities;
std::vector<NodePriorityData> node_data(number_of_nodes);
is_core_node.resize(number_of_nodes, false); is_core_node.resize(number_of_nodes, false);
std::vector<RemainingNodeData> remaining_nodes(number_of_nodes);
// initialize priorities in parallel // initialize priorities in parallel
tbb::parallel_for(tbb::blocked_range<int>(0, number_of_nodes, InitGrainSize), tbb::parallel_for(tbb::blocked_range<int>(0, number_of_nodes, InitGrainSize),
[&remaining_nodes](const tbb::blocked_range<int> &range) [this, &remaining_nodes](const tbb::blocked_range<int> &range)
{ {
for (int x = range.begin(), end = range.end(); x != end; ++x) for (int x = range.begin(), end = range.end(); x != end; ++x)
{ {
@ -318,25 +329,44 @@ class Contractor
} }
}); });
std::cout << "initializing elimination PQ ..." << std::flush; bool use_cached_node_priorities = !node_levels.empty();
tbb::parallel_for(tbb::blocked_range<int>(0, number_of_nodes, PQGrainSize), if (use_cached_node_priorities)
[this, &node_priorities, &node_data, &thread_data_list](
const tbb::blocked_range<int> &range)
{
ContractorThreadData *data = thread_data_list.getThreadData();
for (int x = range.begin(), end = range.end(); x != end; ++x)
{
node_priorities[x] =
this->EvaluateNodePriority(data, &node_data[x], x);
}
});
std::cout << "ok" << std::endl << "preprocessing " << number_of_nodes << " nodes ..."
<< std::flush;
bool flushed_contractor = false;
while (number_of_nodes > 2 && number_of_contracted_nodes < static_cast<NodeID>(number_of_nodes * core_factor) )
{ {
if (!flushed_contractor && (number_of_contracted_nodes > static_cast<NodeID>(number_of_nodes * 0.65 * core_factor))) std::cout << "using cached node priorities ..." << std::flush;
node_priorities.swap(node_levels);
std::cout << "ok" << std::endl;
}
else
{
node_data.resize(number_of_nodes);
node_priorities.resize(number_of_nodes);
node_levels.resize(number_of_nodes);
std::cout << "initializing elimination PQ ..." << std::flush;
tbb::parallel_for(tbb::blocked_range<int>(0, number_of_nodes, PQGrainSize),
[this, &node_priorities, &node_data, &thread_data_list](
const tbb::blocked_range<int> &range)
{
ContractorThreadData *data = thread_data_list.getThreadData();
for (int x = range.begin(), end = range.end(); x != end; ++x)
{
node_priorities[x] =
this->EvaluateNodePriority(data, &node_data[x], x);
}
});
std::cout << "ok" << std::endl;
}
BOOST_ASSERT(node_priorities.size() == number_of_nodes);
std::cout << "preprocessing " << number_of_nodes << " nodes ..." << std::flush;
unsigned current_level = 0;
bool flushed_contractor = false;
while (number_of_nodes > 2 &&
number_of_contracted_nodes < static_cast<NodeID>(number_of_nodes * core_factor))
{
if (!flushed_contractor && (number_of_contracted_nodes >
static_cast<NodeID>(number_of_nodes * 0.65 * core_factor)))
{ {
DeallocatingVector<ContractorEdge> new_edge_set; // this one is not explicitely DeallocatingVector<ContractorEdge> new_edge_set; // this one is not explicitely
// cleared since it goes out of // cleared since it goes out of
@ -355,28 +385,32 @@ class Contractor
// remaining graph // remaining graph
std::vector<NodeID> new_node_id_from_orig_id_map(number_of_nodes, UINT_MAX); std::vector<NodeID> new_node_id_from_orig_id_map(number_of_nodes, UINT_MAX);
// build forward and backward renumbering map and remap ids in remaining_nodes and
// Priorities.
for (const auto new_node_id : osrm::irange<std::size_t>(0, remaining_nodes.size())) for (const auto new_node_id : osrm::irange<std::size_t>(0, remaining_nodes.size()))
{ {
auto& node = remaining_nodes[new_node_id];
BOOST_ASSERT(node_priorities.size() > node.id);
new_node_priority[new_node_id] = node_priorities[node.id];
}
// build forward and backward renumbering map and remap ids in remaining_nodes
for (const auto new_node_id : osrm::irange<std::size_t>(0, remaining_nodes.size()))
{
auto& node = remaining_nodes[new_node_id];
// create renumbering maps in both directions // create renumbering maps in both directions
orig_node_id_from_new_node_id_map[new_node_id] = remaining_nodes[new_node_id].id; orig_node_id_from_new_node_id_map[new_node_id] = node.id;
new_node_id_from_orig_id_map[remaining_nodes[new_node_id].id] = new_node_id; new_node_id_from_orig_id_map[node.id] = new_node_id;
new_node_priority[new_node_id] = node.id = new_node_id;
node_priorities[remaining_nodes[new_node_id].id];
remaining_nodes[new_node_id].id = new_node_id;
} }
// walk over all nodes // walk over all nodes
for (const auto i : for (const auto source :
osrm::irange<std::size_t>(0, contractor_graph->GetNumberOfNodes())) osrm::irange<NodeID>(0, contractor_graph->GetNumberOfNodes()))
{ {
const NodeID source = i;
for (auto current_edge : contractor_graph->GetAdjacentEdgeRange(source)) for (auto current_edge : contractor_graph->GetAdjacentEdgeRange(source))
{ {
ContractorGraph::EdgeData &data = ContractorGraph::EdgeData &data =
contractor_graph->GetEdgeData(current_edge); contractor_graph->GetEdgeData(current_edge);
const NodeID target = contractor_graph->GetTarget(current_edge); const NodeID target = contractor_graph->GetTarget(current_edge);
if (SPECIAL_NODEID == new_node_id_from_orig_id_map[i]) if (SPECIAL_NODEID == new_node_id_from_orig_id_map[source])
{ {
external_edge_list.push_back({source, target, data}); external_edge_list.push_back({source, target, data});
} }
@ -411,7 +445,7 @@ class Contractor
contractor_graph.reset(); contractor_graph.reset();
// create new graph // create new graph
std::sort(new_edge_set.begin(), new_edge_set.end()); tbb::parallel_sort(new_edge_set.begin(), new_edge_set.end());
contractor_graph = contractor_graph =
std::make_shared<ContractorGraph>(remaining_nodes.size(), new_edge_set); std::make_shared<ContractorGraph>(remaining_nodes.size(), new_edge_set);
@ -423,14 +457,13 @@ class Contractor
thread_data_list.number_of_nodes = contractor_graph->GetNumberOfNodes(); thread_data_list.number_of_nodes = contractor_graph->GetNumberOfNodes();
} }
const int last = (int)remaining_nodes.size(); tbb::parallel_for(tbb::blocked_range<std::size_t>(0, remaining_nodes.size(), IndependentGrainSize),
tbb::parallel_for(tbb::blocked_range<int>(0, last, IndependentGrainSize),
[this, &node_priorities, &remaining_nodes, &thread_data_list]( [this, &node_priorities, &remaining_nodes, &thread_data_list](
const tbb::blocked_range<int> &range) const tbb::blocked_range<std::size_t> &range)
{ {
ContractorThreadData *data = thread_data_list.getThreadData(); ContractorThreadData *data = thread_data_list.getThreadData();
// determine independent node set // determine independent node set
for (int i = range.begin(), end = range.end(); i != end; ++i) for (auto i = range.begin(), end = range.end(); i != end; ++i)
{ {
const NodeID node = remaining_nodes[i].id; const NodeID node = remaining_nodes[i].id;
remaining_nodes[i].is_independent = remaining_nodes[i].is_independent =
@ -438,17 +471,45 @@ class Contractor
} }
}); });
const auto first = stable_partition(remaining_nodes.begin(), remaining_nodes.end(), // sort all remaining nodes to the beginning of the sequence
const auto begin_independent_nodes = stable_partition(remaining_nodes.begin(), remaining_nodes.end(),
[](RemainingNodeData node_data) [](RemainingNodeData node_data)
{ {
return !node_data.is_independent; return !node_data.is_independent;
}); });
const int first_independent_node = static_cast<int>(first - remaining_nodes.begin()); auto begin_independent_nodes_idx = std::distance(remaining_nodes.begin(), begin_independent_nodes);
auto end_independent_nodes_idx = remaining_nodes.size();
if (!use_cached_node_priorities)
{
// write out contraction level
tbb::parallel_for(
tbb::blocked_range<std::size_t>(begin_independent_nodes_idx, end_independent_nodes_idx, ContractGrainSize),
[this, remaining_nodes, flushed_contractor, current_level](const tbb::blocked_range<std::size_t> &range)
{
if (flushed_contractor)
{
for (int position = range.begin(), end = range.end(); position != end; ++position)
{
const NodeID x = remaining_nodes[position].id;
node_levels[orig_node_id_from_new_node_id_map[x]] = current_level;
}
}
else
{
for (int position = range.begin(), end = range.end(); position != end; ++position)
{
const NodeID x = remaining_nodes[position].id;
node_levels[x] = current_level;
}
}
});
}
// contract independent nodes // contract independent nodes
tbb::parallel_for( tbb::parallel_for(
tbb::blocked_range<int>(first_independent_node, last, ContractGrainSize), tbb::blocked_range<std::size_t>(begin_independent_nodes_idx, end_independent_nodes_idx, ContractGrainSize),
[this, &remaining_nodes, &thread_data_list](const tbb::blocked_range<int> &range) [this, &remaining_nodes, &thread_data_list](const tbb::blocked_range<std::size_t> &range)
{ {
ContractorThreadData *data = thread_data_list.getThreadData(); ContractorThreadData *data = thread_data_list.getThreadData();
for (int position = range.begin(), end = range.end(); position != end; ++position) for (int position = range.begin(), end = range.end(); position != end; ++position)
@ -457,16 +518,9 @@ class Contractor
this->ContractNode<false>(data, x); this->ContractNode<false>(data, x);
} }
}); });
// make sure we really sort each block
tbb::parallel_for( tbb::parallel_for(
thread_data_list.data.range(), tbb::blocked_range<int>(begin_independent_nodes_idx, end_independent_nodes_idx, DeleteGrainSize),
[&](const ThreadDataContainer::EnumerableThreadData::range_type &range)
{
for (auto &data : range)
std::sort(data->inserted_edges.begin(), data->inserted_edges.end());
});
tbb::parallel_for(
tbb::blocked_range<int>(first_independent_node, last, DeleteGrainSize),
[this, &remaining_nodes, &thread_data_list](const tbb::blocked_range<int> &range) [this, &remaining_nodes, &thread_data_list](const tbb::blocked_range<int> &range)
{ {
ContractorThreadData *data = thread_data_list.getThreadData(); ContractorThreadData *data = thread_data_list.getThreadData();
@ -477,6 +531,16 @@ class Contractor
} }
}); });
// make sure we really sort each block
tbb::parallel_for(
thread_data_list.data.range(),
[&](const ThreadDataContainer::EnumerableThreadData::range_type &range)
{
for (auto &data : range)
tbb::parallel_sort(data->inserted_edges.begin(),
data->inserted_edges.end());
});
// insert new edges // insert new edges
for (auto &data : thread_data_list.data) for (auto &data : thread_data_list.data)
{ {
@ -502,23 +566,25 @@ class Contractor
data->inserted_edges.clear(); data->inserted_edges.clear();
} }
tbb::parallel_for( if (!use_cached_node_priorities)
tbb::blocked_range<int>(first_independent_node, last, NeighboursGrainSize), {
[this, &remaining_nodes, &node_priorities, &node_data, &thread_data_list]( tbb::parallel_for(
const tbb::blocked_range<int> &range) tbb::blocked_range<int>(begin_independent_nodes_idx, end_independent_nodes_idx, NeighboursGrainSize),
{ [this, &node_priorities, &remaining_nodes, &node_data, &thread_data_list](
ContractorThreadData *data = thread_data_list.getThreadData(); const tbb::blocked_range<int> &range)
for (int position = range.begin(), end = range.end(); position != end; ++position)
{ {
NodeID x = remaining_nodes[position].id; ContractorThreadData *data = thread_data_list.getThreadData();
this->UpdateNodeNeighbours(node_priorities, node_data, data, x); for (int position = range.begin(), end = range.end(); position != end; ++position)
} {
}); NodeID x = remaining_nodes[position].id;
this->UpdateNodeNeighbours(node_priorities, node_data, data, x);
}
});
}
// remove contracted nodes from the pool // remove contracted nodes from the pool
number_of_contracted_nodes += last - first_independent_node; number_of_contracted_nodes += end_independent_nodes_idx - begin_independent_nodes_idx;
remaining_nodes.resize(first_independent_node); remaining_nodes.resize(begin_independent_nodes_idx);
remaining_nodes.shrink_to_fit();
// unsigned maxdegree = 0; // unsigned maxdegree = 0;
// unsigned avgdegree = 0; // unsigned avgdegree = 0;
// unsigned mindegree = UINT_MAX; // unsigned mindegree = UINT_MAX;
@ -545,16 +611,37 @@ class Contractor
// quad: " << quaddegree; // quad: " << quaddegree;
p.printStatus(number_of_contracted_nodes); p.printStatus(number_of_contracted_nodes);
++current_level;
} }
if (remaining_nodes.size() > 2) if (remaining_nodes.size() > 2)
{ {
// TODO: for small cores a sorted array of core ids might also work good if (orig_node_id_from_new_node_id_map.size() > 0)
for (const auto& node : remaining_nodes) {
{ tbb::parallel_for(
auto orig_id = orig_node_id_from_new_node_id_map[node.id]; tbb::blocked_range<int>(0, remaining_nodes.size(), InitGrainSize),
is_core_node[orig_id] = true; [this, &remaining_nodes](const tbb::blocked_range<int> &range)
} {
for (int x = range.begin(), end = range.end(); x != end; ++x)
{
const auto orig_id = remaining_nodes[x].id;
is_core_node[orig_node_id_from_new_node_id_map[orig_id]] = true;
}
});
}
else
{
tbb::parallel_for(
tbb::blocked_range<int>(0, remaining_nodes.size(), InitGrainSize),
[this, &remaining_nodes](const tbb::blocked_range<int> &range)
{
for (int x = range.begin(), end = range.end(); x != end; ++x)
{
const auto orig_id = remaining_nodes[x].id;
is_core_node[orig_id] = true;
}
});
}
} }
else else
{ {
@ -563,7 +650,8 @@ class Contractor
is_core_node.clear(); is_core_node.clear();
} }
SimpleLogger().Write() << "[core] " << remaining_nodes.size() << " nodes " << contractor_graph->GetNumberOfEdges() << " edges." << std::endl; SimpleLogger().Write() << "[core] " << remaining_nodes.size() << " nodes "
<< contractor_graph->GetNumberOfEdges() << " edges." << std::endl;
thread_data_list.data.clear(); thread_data_list.data.clear();
} }
@ -573,6 +661,11 @@ class Contractor
out_is_core_node.swap(is_core_node); out_is_core_node.swap(is_core_node);
} }
inline void GetNodeLevels(std::vector<float> &out_node_levels)
{
out_node_levels.swap(node_levels);
}
template <class Edge> inline void GetEdges(DeallocatingVector<Edge> &edges) template <class Edge> inline void GetEdges(DeallocatingVector<Edge> &edges)
{ {
Percent p(contractor_graph->GetNumberOfNodes()); Percent p(contractor_graph->GetNumberOfNodes());
@ -982,6 +1075,7 @@ class Contractor
std::shared_ptr<ContractorGraph> contractor_graph; std::shared_ptr<ContractorGraph> contractor_graph;
stxxl::vector<QueryEdge> external_edge_list; stxxl::vector<QueryEdge> external_edge_list;
std::vector<NodeID> orig_node_id_from_new_node_id_map; std::vector<NodeID> orig_node_id_from_new_node_id_map;
std::vector<float> node_levels;
std::vector<bool> is_core_node; std::vector<bool> is_core_node;
XORFastHash fast_hash; XORFastHash fast_hash;
}; };

View File

@ -27,7 +27,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "contractor_options.hpp" #include "contractor_options.hpp"
#include "../util/git_sha.hpp" #include "util/version.hpp"
#include "../util/simple_logger.hpp" #include "../util/simple_logger.hpp"
#include <boost/filesystem.hpp> #include <boost/filesystem.hpp>
@ -48,19 +48,27 @@ ContractorOptions::ParseArguments(int argc, char *argv[], ContractorConfig &cont
// declare a group of options that will be allowed both on command line and in config file // declare a group of options that will be allowed both on command line and in config file
boost::program_options::options_description config_options("Configuration"); boost::program_options::options_description config_options("Configuration");
config_options.add_options()( config_options.add_options()(
"restrictions,r", "profile,p",
boost::program_options::value<boost::filesystem::path>(&contractor_config.restrictions_path), boost::program_options::value<boost::filesystem::path>(&contractor_config.profile_path)
"Restrictions file in .osrm.restrictions format")( ->default_value("profile.lua"),
"profile,p", boost::program_options::value<boost::filesystem::path>(&contractor_config.profile_path)
->default_value("profile.lua"),
"Path to LUA routing profile")( "Path to LUA routing profile")(
"threads,t", boost::program_options::value<unsigned int>(&contractor_config.requested_num_threads) "threads,t",
->default_value(tbb::task_scheduler_init::default_num_threads()), boost::program_options::value<unsigned int>(&contractor_config.requested_num_threads)
->default_value(tbb::task_scheduler_init::default_num_threads()),
"Number of threads to use")( "Number of threads to use")(
"core,k", boost::program_options::value<double>(&contractor_config.core_factor) "core,k", boost::program_options::value<double>(&contractor_config.core_factor)
->default_value(1.0),"Percentage of the graph (in vertices) to contract [0.1]"); ->default_value(1.0),"Percentage of the graph (in vertices) to contract [0..1]")(
"segment-speed-file", boost::program_options::value<std::string>(&contractor_config.segment_speed_lookup_path),
"Lookup file containing nodeA,nodeB,speed data to adjust edge weights")(
"level-cache,o",
boost::program_options::value<bool>(&contractor_config.use_cached_priority)->default_value(false),
"Use .level file to retain the contaction level for each node from the last run.");
#ifdef DEBUG_GEOMETRY
config_options.add_options()(
"debug-geometry", boost::program_options::value<std::string>(&contractor_config.debug_geometry_path)
,"Write out edge-weight debugging geometry data in GeoJSON format to this file");
#endif
// hidden options, will be allowed both on command line and in config file, but will not be // hidden options, will be allowed both on command line and in config file, but will not be
// shown to the user // shown to the user
@ -102,7 +110,7 @@ ContractorOptions::ParseArguments(int argc, char *argv[], ContractorConfig &cont
if (option_variables.count("version")) if (option_variables.count("version"))
{ {
SimpleLogger().Write() << g_GIT_DESCRIPTION; SimpleLogger().Write() << OSRM_VERSION;
return return_code::exit; return return_code::exit;
} }
@ -114,11 +122,6 @@ ContractorOptions::ParseArguments(int argc, char *argv[], ContractorConfig &cont
boost::program_options::notify(option_variables); boost::program_options::notify(option_variables);
if (!option_variables.count("restrictions"))
{
contractor_config.restrictions_path = contractor_config.osrm_input_path.string() + ".restrictions";
}
if (!option_variables.count("input")) if (!option_variables.count("input"))
{ {
SimpleLogger().Write() << "\n" << visible_options; SimpleLogger().Write() << "\n" << visible_options;
@ -130,11 +133,10 @@ ContractorOptions::ParseArguments(int argc, char *argv[], ContractorConfig &cont
void ContractorOptions::GenerateOutputFilesNames(ContractorConfig &contractor_config) void ContractorOptions::GenerateOutputFilesNames(ContractorConfig &contractor_config)
{ {
contractor_config.node_output_path = contractor_config.osrm_input_path.string() + ".nodes"; contractor_config.level_output_path = contractor_config.osrm_input_path.string() + ".level";
contractor_config.core_output_path = contractor_config.osrm_input_path.string() + ".core"; contractor_config.core_output_path = contractor_config.osrm_input_path.string() + ".core";
contractor_config.edge_output_path = contractor_config.osrm_input_path.string() + ".edges";
contractor_config.geometry_output_path = contractor_config.osrm_input_path.string() + ".geometry";
contractor_config.graph_output_path = contractor_config.osrm_input_path.string() + ".hsgr"; contractor_config.graph_output_path = contractor_config.osrm_input_path.string() + ".hsgr";
contractor_config.rtree_nodes_output_path = contractor_config.osrm_input_path.string() + ".ramIndex"; contractor_config.edge_based_graph_path = contractor_config.osrm_input_path.string() + ".ebg";
contractor_config.rtree_leafs_output_path = contractor_config.osrm_input_path.string() + ".fileIndex"; contractor_config.edge_segment_lookup_path = contractor_config.osrm_input_path.string() + ".edge_segment_lookup";
contractor_config.edge_penalty_path = contractor_config.osrm_input_path.string() + ".edge_penalties";
} }

View File

@ -45,16 +45,16 @@ struct ContractorConfig
boost::filesystem::path config_file_path; boost::filesystem::path config_file_path;
boost::filesystem::path osrm_input_path; boost::filesystem::path osrm_input_path;
boost::filesystem::path restrictions_path;
boost::filesystem::path profile_path; boost::filesystem::path profile_path;
std::string node_output_path; std::string level_output_path;
std::string core_output_path; std::string core_output_path;
std::string edge_output_path;
std::string geometry_output_path;
std::string graph_output_path; std::string graph_output_path;
std::string rtree_nodes_output_path; std::string edge_based_graph_path;
std::string rtree_leafs_output_path;
std::string edge_segment_lookup_path;
std::string edge_penalty_path;
bool use_cached_priority;
unsigned requested_num_threads; unsigned requested_num_threads;
@ -63,6 +63,12 @@ struct ContractorConfig
//The remaining vertices form the core of the hierarchy //The remaining vertices form the core of the hierarchy
//(e.g. 0.8 contracts 80 percent of the hierarchy, leaving a core of 20%) //(e.g. 0.8 contracts 80 percent of the hierarchy, leaving a core of 20%)
double core_factor; double core_factor;
std::string segment_speed_lookup_path;
#ifdef DEBUG_GEOMETRY
std::string debug_geometry_path;
#endif
}; };
struct ContractorOptions struct ContractorOptions

View File

@ -26,17 +26,13 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#include "processing_chain.hpp" #include "processing_chain.hpp"
#include "contractor.hpp"
#include "contractor.hpp" #include "contractor.hpp"
#include "../algorithms/graph_compressor.hpp"
#include "../algorithms/tarjan_scc.hpp"
#include "../algorithms/crc32_processor.hpp"
#include "../data_structures/compressed_edge_container.hpp"
#include "../data_structures/deallocating_vector.hpp"
#include "../data_structures/static_rtree.hpp"
#include "../data_structures/restriction_map.hpp"
#include "../util/git_sha.hpp" #include "../data_structures/deallocating_vector.hpp"
#include "../algorithms/crc32_processor.hpp"
#include "../util/graph_loader.hpp" #include "../util/graph_loader.hpp"
#include "../util/integer_range.hpp" #include "../util/integer_range.hpp"
#include "../util/lua_util.hpp" #include "../util/lua_util.hpp"
@ -46,6 +42,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../util/timing_util.hpp" #include "../util/timing_util.hpp"
#include "../typedefs.h" #include "../typedefs.h"
#include <fast-cpp-csv-parser/csv.h>
#include <boost/filesystem/fstream.hpp> #include <boost/filesystem/fstream.hpp>
#include <boost/program_options.hpp> #include <boost/program_options.hpp>
@ -57,6 +55,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <thread> #include <thread>
#include <vector> #include <vector>
#include "../util/debug_geometry.hpp"
Prepare::~Prepare() {} Prepare::~Prepare() {}
int Prepare::Run() int Prepare::Run()
@ -70,58 +70,49 @@ int Prepare::Run()
"changing EdgeBasedEdge type has influence on memory consumption!"); "changing EdgeBasedEdge type has influence on memory consumption!");
#endif #endif
if (config.core_factor > 1.0 || config.core_factor < 0)
{
throw osrm::exception("Core factor must be between 0.0 to 1.0 (inclusive)");
}
TIMER_START(preparing); TIMER_START(preparing);
// Create a new lua state // Create a new lua state
SimpleLogger().Write() << "Generating edge-expanded graph representation"; SimpleLogger().Write() << "Loading edge-expanded graph representation";
TIMER_START(expansion);
std::vector<EdgeBasedNode> node_based_edge_list;
DeallocatingVector<EdgeBasedEdge> edge_based_edge_list; DeallocatingVector<EdgeBasedEdge> edge_based_edge_list;
std::vector<QueryNode> internal_to_external_node_map;
auto graph_size = BuildEdgeExpandedGraph(internal_to_external_node_map, node_based_edge_list,
edge_based_edge_list);
auto number_of_node_based_nodes = graph_size.first; size_t max_edge_id = LoadEdgeExpandedGraph(
auto max_edge_id = graph_size.second; config.edge_based_graph_path, edge_based_edge_list, config.edge_segment_lookup_path,
config.edge_penalty_path, config.segment_speed_lookup_path);
TIMER_STOP(expansion);
SimpleLogger().Write() << "building r-tree ...";
TIMER_START(rtree);
FindComponents(max_edge_id, edge_based_edge_list, node_based_edge_list);
BuildRTree(node_based_edge_list, internal_to_external_node_map);
TIMER_STOP(rtree);
SimpleLogger().Write() << "writing node map ...";
WriteNodeMapping(internal_to_external_node_map);
// Contracting the edge-expanded graph // Contracting the edge-expanded graph
TIMER_START(contraction); TIMER_START(contraction);
std::vector<bool> is_core_node; std::vector<bool> is_core_node;
std::vector<float> node_levels;
if (config.use_cached_priority)
{
ReadNodeLevels(node_levels);
}
DeallocatingVector<QueryEdge> contracted_edge_list; DeallocatingVector<QueryEdge> contracted_edge_list;
ContractGraph(max_edge_id, edge_based_edge_list, contracted_edge_list, is_core_node); ContractGraph(max_edge_id, edge_based_edge_list, contracted_edge_list, is_core_node,
node_levels);
TIMER_STOP(contraction); TIMER_STOP(contraction);
SimpleLogger().Write() << "Contraction took " << TIMER_SEC(contraction) << " sec"; SimpleLogger().Write() << "Contraction took " << TIMER_SEC(contraction) << " sec";
std::size_t number_of_used_edges = std::size_t number_of_used_edges = WriteContractedGraph(max_edge_id, contracted_edge_list);
WriteContractedGraph(max_edge_id, node_based_edge_list, contracted_edge_list);
WriteCoreNodeMarker(std::move(is_core_node)); WriteCoreNodeMarker(std::move(is_core_node));
if (!config.use_cached_priority)
{
WriteNodeLevels(std::move(node_levels));
}
TIMER_STOP(preparing); TIMER_STOP(preparing);
SimpleLogger().Write() << "Preprocessing : " << TIMER_SEC(preparing) << " seconds"; SimpleLogger().Write() << "Preprocessing : " << TIMER_SEC(preparing) << " seconds";
SimpleLogger().Write() << "Expansion : " << (number_of_node_based_nodes / TIMER_SEC(expansion))
<< " nodes/sec and " << ((max_edge_id + 1) / TIMER_SEC(expansion))
<< " edges/sec";
SimpleLogger().Write() << "Contraction: " << ((max_edge_id + 1) / TIMER_SEC(contraction)) SimpleLogger().Write() << "Contraction: " << ((max_edge_id + 1) / TIMER_SEC(contraction))
<< " nodes/sec and " << number_of_used_edges / TIMER_SEC(contraction) << " nodes/sec and " << number_of_used_edges / TIMER_SEC(contraction)
<< " edges/sec"; << " edges/sec";
@ -131,85 +122,179 @@ int Prepare::Run()
return 0; return 0;
} }
void Prepare::FindComponents(unsigned max_edge_id, namespace std
const DeallocatingVector<EdgeBasedEdge> &input_edge_list,
std::vector<EdgeBasedNode> &input_nodes) const
{ {
struct UncontractedEdgeData
template <> struct hash<std::pair<OSMNodeID, OSMNodeID>>
{
std::size_t operator()(const std::pair<OSMNodeID, OSMNodeID> &k) const
{ {
}; return OSMNodeID_to_uint64_t(k.first) ^ (OSMNodeID_to_uint64_t(k.second) << 12);
struct InputEdge }
};
}
std::size_t Prepare::LoadEdgeExpandedGraph(std::string const &edge_based_graph_filename,
DeallocatingVector<EdgeBasedEdge> &edge_based_edge_list,
const std::string &edge_segment_lookup_filename,
const std::string &edge_penalty_filename,
const std::string &segment_speed_filename)
{
SimpleLogger().Write() << "Opening " << edge_based_graph_filename;
boost::filesystem::ifstream input_stream(edge_based_graph_filename, std::ios::binary);
const bool update_edge_weights = segment_speed_filename != "";
boost::filesystem::ifstream edge_segment_input_stream;
boost::filesystem::ifstream edge_fixed_penalties_input_stream;
if (update_edge_weights)
{ {
unsigned source; edge_segment_input_stream.open(edge_segment_lookup_filename, std::ios::binary);
unsigned target; edge_fixed_penalties_input_stream.open(edge_penalty_filename, std::ios::binary);
UncontractedEdgeData data; if (!edge_segment_input_stream || !edge_fixed_penalties_input_stream)
bool operator<(const InputEdge &rhs) const
{ {
return source < rhs.source || (source == rhs.source && target < rhs.target); throw osrm::exception("Could not load .edge_segment_lookup or .edge_penalties, did you "
} "run osrm-extract with '--generate-edge-lookup'?");
bool operator==(const InputEdge &rhs) const
{
return source == rhs.source && target == rhs.target;
}
};
using UncontractedGraph = StaticGraph<UncontractedEdgeData>;
std::vector<InputEdge> edges;
edges.reserve(input_edge_list.size() * 2);
for (const auto &edge : input_edge_list)
{
BOOST_ASSERT_MSG(static_cast<unsigned int>(std::max(edge.weight, 1)) > 0,
"edge distance < 1");
if (edge.forward)
{
edges.push_back({edge.source, edge.target, {}});
}
if (edge.backward)
{
edges.push_back({edge.target, edge.source, {}});
} }
} }
// connect forward and backward nodes of each edge const FingerPrint fingerprint_valid = FingerPrint::GetValid();
for (const auto &node : input_nodes) FingerPrint fingerprint_loaded;
input_stream.read((char *)&fingerprint_loaded, sizeof(FingerPrint));
fingerprint_loaded.TestPrepare(fingerprint_valid);
size_t number_of_edges = 0;
size_t max_edge_id = SPECIAL_EDGEID;
input_stream.read((char *)&number_of_edges, sizeof(size_t));
input_stream.read((char *)&max_edge_id, sizeof(size_t));
edge_based_edge_list.resize(number_of_edges);
SimpleLogger().Write() << "Reading " << number_of_edges << " edges from the edge based graph";
std::unordered_map<std::pair<OSMNodeID, OSMNodeID>, unsigned> segment_speed_lookup;
if (update_edge_weights)
{ {
if (node.reverse_edge_based_node_id != SPECIAL_NODEID) SimpleLogger().Write() << "Segment speed data supplied, will update edge weights from "
<< segment_speed_filename;
io::CSVReader<3> csv_in(segment_speed_filename);
csv_in.set_header("from_node", "to_node", "speed");
uint64_t from_node_id;
uint64_t to_node_id;
unsigned speed;
while (csv_in.read_row(from_node_id, to_node_id, speed))
{ {
edges.push_back({node.forward_edge_based_node_id, node.reverse_edge_based_node_id, {}}); segment_speed_lookup[std::make_pair(OSMNodeID(from_node_id), OSMNodeID(to_node_id))] = speed;
edges.push_back({node.reverse_edge_based_node_id, node.forward_edge_based_node_id, {}});
} }
} }
tbb::parallel_sort(edges.begin(), edges.end()); DEBUG_GEOMETRY_START(config);
auto new_end = std::unique(edges.begin(), edges.end());
edges.resize(new_end - edges.begin());
auto uncontractor_graph = std::make_shared<UncontractedGraph>(max_edge_id + 1, edges); // TODO: can we read this in bulk? DeallocatingVector isn't necessarily
// all stored contiguously
TarjanSCC<UncontractedGraph> component_search( for (; number_of_edges > 0; --number_of_edges)
std::const_pointer_cast<const UncontractedGraph>(uncontractor_graph));
component_search.run();
for (auto &node : input_nodes)
{ {
auto forward_component = component_search.get_component_id(node.forward_edge_based_node_id); EdgeBasedEdge inbuffer;
BOOST_ASSERT(node.reverse_edge_based_node_id == SPECIAL_EDGEID || input_stream.read((char *) &inbuffer, sizeof(EdgeBasedEdge));
forward_component ==
component_search.get_component_id(node.reverse_edge_based_node_id));
const unsigned component_size = component_search.get_component_size(forward_component); if (update_edge_weights)
const bool is_tiny_component = component_size < 1000; {
node.component_id = is_tiny_component ? (1 + forward_component) : 0; // Processing-time edge updates
unsigned fixed_penalty;
edge_fixed_penalties_input_stream.read(reinterpret_cast<char *>(&fixed_penalty),
sizeof(fixed_penalty));
int new_weight = 0;
unsigned num_osm_nodes = 0;
edge_segment_input_stream.read(reinterpret_cast<char *>(&num_osm_nodes),
sizeof(num_osm_nodes));
OSMNodeID previous_osm_node_id;
edge_segment_input_stream.read(reinterpret_cast<char *>(&previous_osm_node_id),
sizeof(previous_osm_node_id));
OSMNodeID this_osm_node_id;
double segment_length;
int segment_weight;
--num_osm_nodes;
for (; num_osm_nodes != 0; --num_osm_nodes)
{
edge_segment_input_stream.read(reinterpret_cast<char *>(&this_osm_node_id),
sizeof(this_osm_node_id));
edge_segment_input_stream.read(reinterpret_cast<char *>(&segment_length),
sizeof(segment_length));
edge_segment_input_stream.read(reinterpret_cast<char *>(&segment_weight),
sizeof(segment_weight));
auto speed_iter = segment_speed_lookup.find(
std::make_pair(previous_osm_node_id, this_osm_node_id));
if (speed_iter != segment_speed_lookup.end())
{
// This sets the segment weight using the same formula as the
// EdgeBasedGraphFactory for consistency. The *why* of this formula
// is lost in the annals of time.
int new_segment_weight =
std::max(1, static_cast<int>(std::floor(
(segment_length * 10.) / (speed_iter->second / 3.6) + .5)));
new_weight += new_segment_weight;
DEBUG_GEOMETRY_EDGE(
new_segment_weight,
segment_length,
previous_osm_node_id,
this_osm_node_id);
}
else
{
// If no lookup found, use the original weight value for this segment
new_weight += segment_weight;
DEBUG_GEOMETRY_EDGE(
segment_weight,
segment_length,
previous_osm_node_id,
this_osm_node_id);
}
previous_osm_node_id = this_osm_node_id;
}
inbuffer.weight = fixed_penalty + new_weight;
}
edge_based_edge_list.emplace_back(std::move(inbuffer));
} }
DEBUG_GEOMETRY_STOP();
SimpleLogger().Write() << "Done reading edges";
return max_edge_id;
}
void Prepare::ReadNodeLevels(std::vector<float> &node_levels) const
{
boost::filesystem::ifstream order_input_stream(config.level_output_path, std::ios::binary);
unsigned level_size;
order_input_stream.read((char *)&level_size, sizeof(unsigned));
node_levels.resize(level_size);
order_input_stream.read((char *)node_levels.data(), sizeof(float) * node_levels.size());
}
void Prepare::WriteNodeLevels(std::vector<float> &&in_node_levels) const
{
std::vector<float> node_levels(std::move(in_node_levels));
boost::filesystem::ofstream order_output_stream(config.level_output_path, std::ios::binary);
unsigned level_size = node_levels.size();
order_output_stream.write((char *)&level_size, sizeof(unsigned));
order_output_stream.write((char *)node_levels.data(), sizeof(float) * node_levels.size());
} }
void Prepare::WriteCoreNodeMarker(std::vector<bool> &&in_is_core_node) const void Prepare::WriteCoreNodeMarker(std::vector<bool> &&in_is_core_node) const
{ {
std::vector<bool> is_core_node(in_is_core_node); std::vector<bool> is_core_node(std::move(in_is_core_node));
std::vector<char> unpacked_bool_flags(is_core_node.size()); std::vector<char> unpacked_bool_flags(std::move(is_core_node.size()));
for (auto i = 0u; i < is_core_node.size(); ++i) for (auto i = 0u; i < is_core_node.size(); ++i)
{ {
unpacked_bool_flags[i] = is_core_node[i] ? 1 : 0; unpacked_bool_flags[i] = is_core_node[i] ? 1 : 0;
@ -224,11 +309,8 @@ void Prepare::WriteCoreNodeMarker(std::vector<bool> &&in_is_core_node) const
} }
std::size_t Prepare::WriteContractedGraph(unsigned max_node_id, std::size_t Prepare::WriteContractedGraph(unsigned max_node_id,
const std::vector<EdgeBasedNode> &node_based_edge_list,
const DeallocatingVector<QueryEdge> &contracted_edge_list) const DeallocatingVector<QueryEdge> &contracted_edge_list)
{ {
const unsigned crc32_value = CalculateEdgeChecksum(node_based_edge_list);
// Sorting contracted edges in a way that the static query graph can read some in in-place. // Sorting contracted edges in a way that the static query graph can read some in in-place.
tbb::parallel_sort(contracted_edge_list.begin(), contracted_edge_list.end()); tbb::parallel_sort(contracted_edge_list.begin(), contracted_edge_list.end());
const unsigned contracted_edge_count = contracted_edge_list.size(); const unsigned contracted_edge_count = contracted_edge_list.size();
@ -284,9 +366,13 @@ std::size_t Prepare::WriteContractedGraph(unsigned max_node_id,
SimpleLogger().Write() << "Serializing node array"; SimpleLogger().Write() << "Serializing node array";
RangebasedCRC32 crc32_calculator;
const unsigned edges_crc32 = crc32_calculator(contracted_edge_list);
SimpleLogger().Write() << "Writing CRC32: " << edges_crc32;
const unsigned node_array_size = node_array.size(); const unsigned node_array_size = node_array.size();
// serialize crc32, aka checksum // serialize crc32, aka checksum
hsgr_output_stream.write((char *)&crc32_value, sizeof(unsigned)); hsgr_output_stream.write((char *)&edges_crc32, sizeof(unsigned));
// serialize number of nodes // serialize number of nodes
hsgr_output_stream.write((char *)&node_array_size, sizeof(unsigned)); hsgr_output_stream.write((char *)&node_array_size, sizeof(unsigned));
// serialize number of edges // serialize number of edges
@ -335,205 +421,21 @@ std::size_t Prepare::WriteContractedGraph(unsigned max_node_id,
return number_of_used_edges; return number_of_used_edges;
} }
unsigned Prepare::CalculateEdgeChecksum(const std::vector<EdgeBasedNode> &node_based_edge_list)
{
RangebasedCRC32 crc32;
if (crc32.using_hardware())
{
SimpleLogger().Write() << "using hardware based CRC32 computation";
}
else
{
SimpleLogger().Write() << "using software based CRC32 computation";
}
const unsigned crc32_value = crc32(node_based_edge_list);
SimpleLogger().Write() << "CRC32: " << crc32_value;
return crc32_value;
}
/**
\brief Setups scripting environment (lua-scripting)
Also initializes speed profile.
*/
void Prepare::SetupScriptingEnvironment(lua_State *lua_state, SpeedProfileProperties &speed_profile)
{
// open utility libraries string library;
luaL_openlibs(lua_state);
// adjust lua load path
luaAddScriptFolderToLoadPath(lua_state, config.profile_path.string().c_str());
// Now call our function in a lua script
if (0 != luaL_dofile(lua_state, config.profile_path.string().c_str()))
{
std::stringstream msg;
msg << lua_tostring(lua_state, -1) << " occured in scripting block";
throw osrm::exception(msg.str());
}
if (0 != luaL_dostring(lua_state, "return traffic_signal_penalty\n"))
{
std::stringstream msg;
msg << lua_tostring(lua_state, -1) << " occured in scripting block";
throw osrm::exception(msg.str());
}
speed_profile.traffic_signal_penalty = 10 * lua_tointeger(lua_state, -1);
SimpleLogger().Write(logDEBUG)
<< "traffic_signal_penalty: " << speed_profile.traffic_signal_penalty;
if (0 != luaL_dostring(lua_state, "return u_turn_penalty\n"))
{
std::stringstream msg;
msg << lua_tostring(lua_state, -1) << " occured in scripting block";
throw osrm::exception(msg.str());
}
speed_profile.u_turn_penalty = 10 * lua_tointeger(lua_state, -1);
speed_profile.has_turn_penalty_function = lua_function_exists(lua_state, "turn_function");
}
/**
\brief Build load restrictions from .restriction file
*/
std::shared_ptr<RestrictionMap> Prepare::LoadRestrictionMap()
{
boost::filesystem::ifstream input_stream(config.restrictions_path,
std::ios::in | std::ios::binary);
std::vector<TurnRestriction> restriction_list;
loadRestrictionsFromFile(input_stream, restriction_list);
SimpleLogger().Write() << " - " << restriction_list.size() << " restrictions.";
return std::make_shared<RestrictionMap>(restriction_list);
}
/**
\brief Load node based graph from .osrm file
*/
std::shared_ptr<NodeBasedDynamicGraph>
Prepare::LoadNodeBasedGraph(std::unordered_set<NodeID> &barrier_nodes,
std::unordered_set<NodeID> &traffic_lights,
std::vector<QueryNode> &internal_to_external_node_map)
{
std::vector<NodeBasedEdge> edge_list;
boost::filesystem::ifstream input_stream(config.osrm_input_path,
std::ios::in | std::ios::binary);
std::vector<NodeID> barrier_list;
std::vector<NodeID> traffic_light_list;
NodeID number_of_node_based_nodes = loadNodesFromFile(
input_stream, barrier_list, traffic_light_list, internal_to_external_node_map);
SimpleLogger().Write() << " - " << barrier_list.size() << " bollard nodes, "
<< traffic_light_list.size() << " traffic lights";
// insert into unordered sets for fast lookup
barrier_nodes.insert(barrier_list.begin(), barrier_list.end());
traffic_lights.insert(traffic_light_list.begin(), traffic_light_list.end());
barrier_list.clear();
barrier_list.shrink_to_fit();
traffic_light_list.clear();
traffic_light_list.shrink_to_fit();
loadEdgesFromFile(input_stream, edge_list);
if (edge_list.empty())
{
SimpleLogger().Write(logWARNING) << "The input data is empty, exiting.";
return std::shared_ptr<NodeBasedDynamicGraph>();
}
return NodeBasedDynamicGraphFromEdges(number_of_node_based_nodes, edge_list);
}
/**
\brief Building an edge-expanded graph from node-based input and turn restrictions
*/
std::pair<std::size_t, std::size_t>
Prepare::BuildEdgeExpandedGraph(std::vector<QueryNode> &internal_to_external_node_map,
std::vector<EdgeBasedNode> &node_based_edge_list,
DeallocatingVector<EdgeBasedEdge> &edge_based_edge_list)
{
lua_State *lua_state = luaL_newstate();
luabind::open(lua_state);
SpeedProfileProperties speed_profile;
SetupScriptingEnvironment(lua_state, speed_profile);
std::unordered_set<NodeID> barrier_nodes;
std::unordered_set<NodeID> traffic_lights;
auto restriction_map = LoadRestrictionMap();
auto node_based_graph =
LoadNodeBasedGraph(barrier_nodes, traffic_lights, internal_to_external_node_map);
CompressedEdgeContainer compressed_edge_container;
GraphCompressor graph_compressor(speed_profile);
graph_compressor.Compress(barrier_nodes, traffic_lights, *restriction_map, *node_based_graph,
compressed_edge_container);
EdgeBasedGraphFactory edge_based_graph_factory(
node_based_graph, compressed_edge_container, barrier_nodes, traffic_lights,
std::const_pointer_cast<RestrictionMap const>(restriction_map),
internal_to_external_node_map, speed_profile);
compressed_edge_container.SerializeInternalVector(config.geometry_output_path);
edge_based_graph_factory.Run(config.edge_output_path, lua_state);
lua_close(lua_state);
edge_based_graph_factory.GetEdgeBasedEdges(edge_based_edge_list);
edge_based_graph_factory.GetEdgeBasedNodes(node_based_edge_list);
auto max_edge_id = edge_based_graph_factory.GetHighestEdgeID();
const std::size_t number_of_node_based_nodes = node_based_graph->GetNumberOfNodes();
return std::make_pair(number_of_node_based_nodes, max_edge_id);
}
/** /**
\brief Build contracted graph. \brief Build contracted graph.
*/ */
void Prepare::ContractGraph(const unsigned max_edge_id, void Prepare::ContractGraph(const unsigned max_edge_id,
DeallocatingVector<EdgeBasedEdge> &edge_based_edge_list, DeallocatingVector<EdgeBasedEdge> &edge_based_edge_list,
DeallocatingVector<QueryEdge> &contracted_edge_list, DeallocatingVector<QueryEdge> &contracted_edge_list,
std::vector<bool> &is_core_node) std::vector<bool> &is_core_node,
std::vector<float> &inout_node_levels) const
{ {
Contractor contractor(max_edge_id + 1, edge_based_edge_list); std::vector<float> node_levels;
node_levels.swap(inout_node_levels);
Contractor contractor(max_edge_id + 1, edge_based_edge_list, std::move(node_levels));
contractor.Run(config.core_factor); contractor.Run(config.core_factor);
contractor.GetEdges(contracted_edge_list); contractor.GetEdges(contracted_edge_list);
contractor.GetCoreMarker(is_core_node); contractor.GetCoreMarker(is_core_node);
} contractor.GetNodeLevels(inout_node_levels);
/**
\brief Writing info on original (node-based) nodes
*/
void Prepare::WriteNodeMapping(const std::vector<QueryNode> &internal_to_external_node_map)
{
boost::filesystem::ofstream node_stream(config.node_output_path, std::ios::binary);
const unsigned size_of_mapping = internal_to_external_node_map.size();
node_stream.write((char *)&size_of_mapping, sizeof(unsigned));
if (size_of_mapping > 0)
{
node_stream.write((char *)internal_to_external_node_map.data(),
size_of_mapping * sizeof(QueryNode));
}
node_stream.close();
}
/**
\brief Building rtree-based nearest-neighbor data structure
Saves tree into '.ramIndex' and leaves into '.fileIndex'.
*/
void Prepare::BuildRTree(const std::vector<EdgeBasedNode> &node_based_edge_list,
const std::vector<QueryNode> &internal_to_external_node_map)
{
StaticRTree<EdgeBasedNode>(node_based_edge_list, config.rtree_nodes_output_path.c_str(),
config.rtree_leafs_output_path.c_str(),
internal_to_external_node_map);
} }

View File

@ -28,10 +28,12 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef PROCESSING_CHAIN_HPP #ifndef PROCESSING_CHAIN_HPP
#define PROCESSING_CHAIN_HPP #define PROCESSING_CHAIN_HPP
#include "contractor.hpp"
#include "contractor_options.hpp" #include "contractor_options.hpp"
#include "edge_based_graph_factory.hpp"
#include "../data_structures/query_edge.hpp" #include "../data_structures/query_edge.hpp"
#include "../data_structures/static_graph.hpp" #include "../data_structures/static_graph.hpp"
#include "../data_structures/deallocating_vector.hpp"
#include "../data_structures/node_based_graph.hpp"
struct SpeedProfileProperties; struct SpeedProfileProperties;
struct EdgeBasedNode; struct EdgeBasedNode;
@ -48,8 +50,6 @@ class Prepare
{ {
public: public:
using EdgeData = QueryEdge::EdgeData; using EdgeData = QueryEdge::EdgeData;
using InputEdge = DynamicGraph<EdgeData>::InputEdge;
using StaticEdge = StaticGraph<EdgeData>::InputEdge;
explicit Prepare(ContractorConfig contractor_config) : config(std::move(contractor_config)) {} explicit Prepare(ContractorConfig contractor_config) : config(std::move(contractor_config)) {}
Prepare(const Prepare &) = delete; Prepare(const Prepare &) = delete;
@ -58,34 +58,27 @@ class Prepare
int Run(); int Run();
protected: protected:
void SetupScriptingEnvironment(lua_State *myLuaState, SpeedProfileProperties &speed_profile);
unsigned CalculateEdgeChecksum(const std::vector<EdgeBasedNode> &node_based_edge_list);
void ContractGraph(const unsigned max_edge_id, void ContractGraph(const unsigned max_edge_id,
DeallocatingVector<EdgeBasedEdge> &edge_based_edge_list, DeallocatingVector<EdgeBasedEdge> &edge_based_edge_list,
DeallocatingVector<QueryEdge> &contracted_edge_list, DeallocatingVector<QueryEdge> &contracted_edge_list,
std::vector<bool> &is_core_node); std::vector<bool> &is_core_node,
std::vector<float> &node_levels) const;
void WriteCoreNodeMarker(std::vector<bool> &&is_core_node) const; void WriteCoreNodeMarker(std::vector<bool> &&is_core_node) const;
void WriteNodeLevels(std::vector<float> &&node_levels) const;
void ReadNodeLevels(std::vector<float> &contraction_order) const;
std::size_t WriteContractedGraph(unsigned number_of_edge_based_nodes, std::size_t WriteContractedGraph(unsigned number_of_edge_based_nodes,
const std::vector<EdgeBasedNode> &node_based_edge_list,
const DeallocatingVector<QueryEdge> &contracted_edge_list); const DeallocatingVector<QueryEdge> &contracted_edge_list);
std::shared_ptr<RestrictionMap> LoadRestrictionMap();
std::shared_ptr<NodeBasedDynamicGraph>
LoadNodeBasedGraph(std::unordered_set<NodeID> &barrier_nodes,
std::unordered_set<NodeID> &traffic_lights,
std::vector<QueryNode> &internal_to_external_node_map);
std::pair<std::size_t, std::size_t>
BuildEdgeExpandedGraph(std::vector<QueryNode> &internal_to_external_node_map,
std::vector<EdgeBasedNode> &node_based_edge_list,
DeallocatingVector<EdgeBasedEdge> &edge_based_edge_list);
void WriteNodeMapping(const std::vector<QueryNode> &internal_to_external_node_map);
void FindComponents(unsigned max_edge_id, void FindComponents(unsigned max_edge_id,
const DeallocatingVector<EdgeBasedEdge> &edges, const DeallocatingVector<EdgeBasedEdge> &edges,
std::vector<EdgeBasedNode> &nodes) const; std::vector<EdgeBasedNode> &nodes) const;
void BuildRTree(const std::vector<EdgeBasedNode> &node_based_edge_list,
const std::vector<QueryNode> &internal_to_external_node_map);
private: private:
ContractorConfig config; ContractorConfig config;
std::size_t LoadEdgeExpandedGraph(const std::string &edge_based_graph_path,
DeallocatingVector<EdgeBasedEdge> &edge_based_edge_list,
const std::string &edge_segment_lookup_path,
const std::string &edge_penalty_path,
const std::string &segment_speed_path);
}; };
#endif // PROCESSING_CHAIN_HPP #endif // PROCESSING_CHAIN_HPP

View File

@ -1,85 +0,0 @@
/*
Copyright (c) 2014, Project OSRM contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CONCURRENT_QUEUE_HPP
#define CONCURRENT_QUEUE_HPP
#include <boost/circular_buffer.hpp>
#include <condition_variable>
#include <mutex>
template <typename Data> class ConcurrentQueue
{
public:
explicit ConcurrentQueue(const size_t max_size) : m_internal_queue(max_size) {}
inline void push(const Data &data)
{
std::unique_lock<std::mutex> lock(m_mutex);
m_not_full.wait(lock, [this]
{
return m_internal_queue.size() < m_internal_queue.capacity();
});
m_internal_queue.push_back(data);
m_not_empty.notify_one();
}
inline bool empty() const { return m_internal_queue.empty(); }
inline void wait_and_pop(Data &popped_value)
{
std::unique_lock<std::mutex> lock(m_mutex);
m_not_empty.wait(lock, [this]
{
return !m_internal_queue.empty();
});
popped_value = m_internal_queue.front();
m_internal_queue.pop_front();
m_not_full.notify_one();
}
inline bool try_pop(Data &popped_value)
{
std::unique_lock<std::mutex> lock(m_mutex);
if (m_internal_queue.empty())
{
return false;
}
popped_value = m_internal_queue.front();
m_internal_queue.pop_front();
m_not_full.notify_one();
return true;
}
private:
boost::circular_buffer<Data> m_internal_queue;
std::mutex m_mutex;
std::condition_variable m_not_empty;
std::condition_variable m_not_full;
};
#endif // CONCURRENT_QUEUE_HPP

View File

@ -237,6 +237,12 @@ class DeallocatingVectorRemoveIterator
} }
}; };
template <typename ElementT, std::size_t ELEMENTS_PER_BLOCK>
class DeallocatingVector;
template<typename T, std::size_t S>
void swap(DeallocatingVector<T, S>& lhs, DeallocatingVector<T, S>& rhs);
template <typename ElementT, std::size_t ELEMENTS_PER_BLOCK = 8388608 / sizeof(ElementT)> template <typename ElementT, std::size_t ELEMENTS_PER_BLOCK = 8388608 / sizeof(ElementT)>
class DeallocatingVector class DeallocatingVector
{ {
@ -257,6 +263,8 @@ class DeallocatingVector
~DeallocatingVector() { clear(); } ~DeallocatingVector() { clear(); }
friend void swap<>(DeallocatingVector<ElementT, ELEMENTS_PER_BLOCK>& lhs, DeallocatingVector<ElementT, ELEMENTS_PER_BLOCK>& rhs);
void swap(DeallocatingVector<ElementT, ELEMENTS_PER_BLOCK> &other) void swap(DeallocatingVector<ElementT, ELEMENTS_PER_BLOCK> &other)
{ {
std::swap(current_size, other.current_size); std::swap(current_size, other.current_size);
@ -386,4 +394,10 @@ class DeallocatingVector
} }
}; };
template<typename T, std::size_t S>
void swap(DeallocatingVector<T, S>& lhs, DeallocatingVector<T, S>& rhs)
{
lhs.swap(rhs);
}
#endif /* DEALLOCATING_VECTOR_HPP */ #endif /* DEALLOCATING_VECTOR_HPP */

View File

@ -46,7 +46,7 @@ struct EdgeBasedNode
u(SPECIAL_NODEID), v(SPECIAL_NODEID), name_id(0), u(SPECIAL_NODEID), v(SPECIAL_NODEID), name_id(0),
forward_weight(INVALID_EDGE_WEIGHT >> 1), reverse_weight(INVALID_EDGE_WEIGHT >> 1), forward_weight(INVALID_EDGE_WEIGHT >> 1), reverse_weight(INVALID_EDGE_WEIGHT >> 1),
forward_offset(0), reverse_offset(0), packed_geometry_id(SPECIAL_EDGEID), forward_offset(0), reverse_offset(0), packed_geometry_id(SPECIAL_EDGEID),
component_id(-1), fwd_segment_position(std::numeric_limits<unsigned short>::max()), component{INVALID_COMPONENTID, false}, fwd_segment_position(std::numeric_limits<unsigned short>::max()),
forward_travel_mode(TRAVEL_MODE_INACCESSIBLE), forward_travel_mode(TRAVEL_MODE_INACCESSIBLE),
backward_travel_mode(TRAVEL_MODE_INACCESSIBLE) backward_travel_mode(TRAVEL_MODE_INACCESSIBLE)
{ {
@ -62,6 +62,7 @@ struct EdgeBasedNode
int forward_offset, int forward_offset,
int reverse_offset, int reverse_offset,
unsigned packed_geometry_id, unsigned packed_geometry_id,
bool is_tiny_component,
unsigned component_id, unsigned component_id,
unsigned short fwd_segment_position, unsigned short fwd_segment_position,
TravelMode forward_travel_mode, TravelMode forward_travel_mode,
@ -70,7 +71,7 @@ struct EdgeBasedNode
reverse_edge_based_node_id(reverse_edge_based_node_id), u(u), v(v), name_id(name_id), reverse_edge_based_node_id(reverse_edge_based_node_id), u(u), v(v), name_id(name_id),
forward_weight(forward_weight), reverse_weight(reverse_weight), forward_weight(forward_weight), reverse_weight(reverse_weight),
forward_offset(forward_offset), reverse_offset(reverse_offset), forward_offset(forward_offset), reverse_offset(reverse_offset),
packed_geometry_id(packed_geometry_id), component_id(component_id), packed_geometry_id(packed_geometry_id), component{component_id, is_tiny_component},
fwd_segment_position(fwd_segment_position), forward_travel_mode(forward_travel_mode), fwd_segment_position(fwd_segment_position), forward_travel_mode(forward_travel_mode),
backward_travel_mode(backward_travel_mode) backward_travel_mode(backward_travel_mode)
{ {
@ -90,8 +91,6 @@ struct EdgeBasedNode
bool IsCompressed() const { return packed_geometry_id != SPECIAL_EDGEID; } bool IsCompressed() const { return packed_geometry_id != SPECIAL_EDGEID; }
bool is_in_tiny_cc() const { return 0 != component_id; }
NodeID forward_edge_based_node_id; // needed for edge-expanded graph NodeID forward_edge_based_node_id; // needed for edge-expanded graph
NodeID reverse_edge_based_node_id; // needed for edge-expanded graph NodeID reverse_edge_based_node_id; // needed for edge-expanded graph
NodeID u; // indices into the coordinates array NodeID u; // indices into the coordinates array
@ -102,7 +101,10 @@ struct EdgeBasedNode
int forward_offset; // prefix sum of the weight up the edge TODO: short must suffice int forward_offset; // prefix sum of the weight up the edge TODO: short must suffice
int reverse_offset; // prefix sum of the weight from the edge TODO: short must suffice int reverse_offset; // prefix sum of the weight from the edge TODO: short must suffice
unsigned packed_geometry_id; // if set, then the edge represents a packed geometry unsigned packed_geometry_id; // if set, then the edge represents a packed geometry
unsigned component_id; struct {
unsigned id : 31;
bool is_tiny : 1;
} component;
unsigned short fwd_segment_position; // segment id in a compressed geometry unsigned short fwd_segment_position; // segment id in a compressed geometry
TravelMode forward_travel_mode : 4; TravelMode forward_travel_mode : 4;
TravelMode backward_travel_mode : 4; TravelMode backward_travel_mode : 4;

View File

@ -31,7 +31,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <limits> #include <limits>
ExternalMemoryNode::ExternalMemoryNode( ExternalMemoryNode::ExternalMemoryNode(
int lat, int lon, unsigned int node_id, bool barrier, bool traffic_lights) int lat, int lon, OSMNodeID node_id, bool barrier, bool traffic_lights)
: QueryNode(lat, lon, node_id), barrier(barrier), traffic_lights(traffic_lights) : QueryNode(lat, lon, node_id), barrier(barrier), traffic_lights(traffic_lights)
{ {
} }
@ -40,13 +40,13 @@ ExternalMemoryNode::ExternalMemoryNode() : barrier(false), traffic_lights(false)
ExternalMemoryNode ExternalMemoryNode::min_value() ExternalMemoryNode ExternalMemoryNode::min_value()
{ {
return ExternalMemoryNode(0, 0, 0, false, false); return ExternalMemoryNode(0, 0, MIN_OSM_NODEID, false, false);
} }
ExternalMemoryNode ExternalMemoryNode::max_value() ExternalMemoryNode ExternalMemoryNode::max_value()
{ {
return ExternalMemoryNode(std::numeric_limits<int>::max(), std::numeric_limits<int>::max(), return ExternalMemoryNode(std::numeric_limits<int>::max(), std::numeric_limits<int>::max(),
std::numeric_limits<unsigned>::max(), false, false); MAX_OSM_NODEID, false, false);
} }
bool ExternalMemoryNodeSTXXLCompare::operator()(const ExternalMemoryNode &left, bool ExternalMemoryNodeSTXXLCompare::operator()(const ExternalMemoryNode &left,

View File

@ -34,7 +34,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
struct ExternalMemoryNode : QueryNode struct ExternalMemoryNode : QueryNode
{ {
ExternalMemoryNode(int lat, int lon, NodeID id, bool barrier, bool traffic_light); ExternalMemoryNode(int lat, int lon, OSMNodeID id, bool barrier, bool traffic_light);
ExternalMemoryNode(); ExternalMemoryNode();

View File

@ -140,7 +140,7 @@ template <class CandidateLists> struct HiddenMarkovModel
for (const auto s : osrm::irange<std::size_t>(0u, viterbi[initial_timestamp].size())) for (const auto s : osrm::irange<std::size_t>(0u, viterbi[initial_timestamp].size()))
{ {
viterbi[initial_timestamp][s] = viterbi[initial_timestamp][s] =
emission_log_probability(candidates_list[initial_timestamp][s].second); emission_log_probability(candidates_list[initial_timestamp][s].distance);
parents[initial_timestamp][s] = std::make_pair(initial_timestamp, s); parents[initial_timestamp][s] = std::make_pair(initial_timestamp, s);
pruned[initial_timestamp][s] = pruned[initial_timestamp][s] =
viterbi[initial_timestamp][s] < osrm::matching::MINIMAL_LOG_PROB; viterbi[initial_timestamp][s] < osrm::matching::MINIMAL_LOG_PROB;

View File

@ -50,7 +50,7 @@ bool NodeBasedEdge::operator<(const NodeBasedEdge &other) const
NodeBasedEdge::NodeBasedEdge() NodeBasedEdge::NodeBasedEdge()
: source(SPECIAL_NODEID), target(SPECIAL_NODEID), name_id(0), weight(0), forward(false), : source(SPECIAL_NODEID), target(SPECIAL_NODEID), name_id(0), weight(0), forward(false),
backward(false), roundabout(false), backward(false), roundabout(false),
access_restricted(false), is_split(false), travel_mode(false) access_restricted(false), startpoint(true), is_split(false), travel_mode(false)
{ {
} }
@ -62,11 +62,12 @@ NodeBasedEdge::NodeBasedEdge(NodeID source,
bool backward, bool backward,
bool roundabout, bool roundabout,
bool access_restricted, bool access_restricted,
bool startpoint,
TravelMode travel_mode, TravelMode travel_mode,
bool is_split) bool is_split)
: source(source), target(target), name_id(name_id), weight(weight), forward(forward), : source(source), target(target), name_id(name_id), weight(weight), forward(forward),
backward(backward), roundabout(roundabout), backward(backward), roundabout(roundabout),
access_restricted(access_restricted), is_split(is_split), travel_mode(travel_mode) access_restricted(access_restricted), startpoint(startpoint), is_split(is_split), travel_mode(travel_mode)
{ {
} }

View File

@ -44,6 +44,7 @@ struct NodeBasedEdge
bool backward, bool backward,
bool roundabout, bool roundabout,
bool access_restricted, bool access_restricted,
bool startpoint,
TravelMode travel_mode, TravelMode travel_mode,
bool is_split); bool is_split);
@ -55,10 +56,31 @@ struct NodeBasedEdge
bool backward : 1; bool backward : 1;
bool roundabout : 1; bool roundabout : 1;
bool access_restricted : 1; bool access_restricted : 1;
bool startpoint : 1;
bool is_split : 1; bool is_split : 1;
TravelMode travel_mode : 4; TravelMode travel_mode : 4;
}; };
struct NodeBasedEdgeWithOSM : NodeBasedEdge
{
explicit NodeBasedEdgeWithOSM(OSMNodeID source,
OSMNodeID target,
NodeID name_id,
EdgeWeight weight,
bool forward,
bool backward,
bool roundabout,
bool access_restricted,
bool startpoint,
TravelMode travel_mode,
bool is_split)
: NodeBasedEdge(SPECIAL_NODEID, SPECIAL_NODEID, name_id, weight, forward, backward, roundabout, access_restricted, startpoint, travel_mode, is_split),
osm_source_id(source), osm_target_id(target) {}
OSMNodeID osm_source_id;
OSMNodeID osm_target_id;
};
struct EdgeBasedEdge struct EdgeBasedEdge
{ {

View File

@ -47,10 +47,10 @@ struct NodeBasedEdgeData
NodeBasedEdgeData(int distance, unsigned edge_id, unsigned name_id, NodeBasedEdgeData(int distance, unsigned edge_id, unsigned name_id,
bool access_restricted, bool reversed, bool access_restricted, bool reversed,
bool roundabout, TravelMode travel_mode) bool roundabout, bool startpoint, TravelMode travel_mode)
: distance(distance), edge_id(edge_id), name_id(name_id), : distance(distance), edge_id(edge_id), name_id(name_id),
access_restricted(access_restricted), reversed(reversed), access_restricted(access_restricted), reversed(reversed),
roundabout(roundabout), travel_mode(travel_mode) roundabout(roundabout), startpoint(startpoint), travel_mode(travel_mode)
{ {
} }
@ -60,6 +60,7 @@ struct NodeBasedEdgeData
bool access_restricted : 1; bool access_restricted : 1;
bool reversed : 1; bool reversed : 1;
bool roundabout : 1; bool roundabout : 1;
bool startpoint : 1;
TravelMode travel_mode : 4; TravelMode travel_mode : 4;
bool IsCompatibleTo(const NodeBasedEdgeData &other) const bool IsCompatibleTo(const NodeBasedEdgeData &other) const
@ -72,10 +73,10 @@ struct NodeBasedEdgeData
using NodeBasedDynamicGraph = DynamicGraph<NodeBasedEdgeData>; using NodeBasedDynamicGraph = DynamicGraph<NodeBasedEdgeData>;
/// Factory method to create NodeBasedDynamicGraph from NodeBasedEdges /// Factory method to create NodeBasedDynamicGraph from NodeBasedEdges
/// The since DynamicGraph expects directed edges, we need to insert /// Since DynamicGraph expects directed edges, we need to insert
/// two edges for undirected edges. /// two edges for undirected edges.
inline std::shared_ptr<NodeBasedDynamicGraph> inline std::shared_ptr<NodeBasedDynamicGraph>
NodeBasedDynamicGraphFromEdges(int number_of_nodes, const std::vector<NodeBasedEdge> &input_edge_list) NodeBasedDynamicGraphFromEdges(std::size_t number_of_nodes, const std::vector<NodeBasedEdge> &input_edge_list)
{ {
auto edges_list = directedEdgesFromCompressed<NodeBasedDynamicGraph::InputEdge>(input_edge_list, auto edges_list = directedEdgesFromCompressed<NodeBasedDynamicGraph::InputEdge>(input_edge_list,
[](NodeBasedDynamicGraph::InputEdge& output_edge, const NodeBasedEdge& input_edge) [](NodeBasedDynamicGraph::InputEdge& output_edge, const NodeBasedEdge& input_edge)
@ -87,6 +88,7 @@ NodeBasedDynamicGraphFromEdges(int number_of_nodes, const std::vector<NodeBasedE
output_edge.data.name_id = input_edge.name_id; output_edge.data.name_id = input_edge.name_id;
output_edge.data.access_restricted = input_edge.access_restricted; output_edge.data.access_restricted = input_edge.access_restricted;
output_edge.data.travel_mode = input_edge.travel_mode; output_edge.data.travel_mode = input_edge.travel_mode;
output_edge.data.startpoint = input_edge.startpoint;
} }
); );

View File

@ -32,10 +32,10 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
struct Cmp struct Cmp
{ {
using value_type = NodeID; using value_type = OSMNodeID;
bool operator()(const NodeID left, const NodeID right) const { return left < right; } bool operator()(const value_type left, const value_type right) const { return left < right; }
value_type max_value() { return 0xffffffff; } value_type max_value() { return MAX_OSM_NODEID; }
value_type min_value() { return 0x0; } value_type min_value() { return MIN_OSM_NODEID; }
}; };
#endif // NODE_ID_HPP #endif // NODE_ID_HPP

View File

@ -42,6 +42,7 @@ PhantomNode::PhantomNode(NodeID forward_node_id,
int forward_offset, int forward_offset,
int reverse_offset, int reverse_offset,
unsigned packed_geometry_id, unsigned packed_geometry_id,
bool is_tiny_component,
unsigned component_id, unsigned component_id,
FixedPointCoordinate &location, FixedPointCoordinate &location,
unsigned short fwd_segment_position, unsigned short fwd_segment_position,
@ -50,7 +51,7 @@ PhantomNode::PhantomNode(NodeID forward_node_id,
: forward_node_id(forward_node_id), reverse_node_id(reverse_node_id), name_id(name_id), : forward_node_id(forward_node_id), reverse_node_id(reverse_node_id), name_id(name_id),
forward_weight(forward_weight), reverse_weight(reverse_weight), forward_weight(forward_weight), reverse_weight(reverse_weight),
forward_offset(forward_offset), reverse_offset(reverse_offset), forward_offset(forward_offset), reverse_offset(reverse_offset),
packed_geometry_id(packed_geometry_id), component_id(component_id), location(location), packed_geometry_id(packed_geometry_id), component{component_id, is_tiny_component}, location(location),
fwd_segment_position(fwd_segment_position), forward_travel_mode(forward_travel_mode), fwd_segment_position(fwd_segment_position), forward_travel_mode(forward_travel_mode),
backward_travel_mode(backward_travel_mode) backward_travel_mode(backward_travel_mode)
{ {
@ -60,7 +61,7 @@ PhantomNode::PhantomNode()
: forward_node_id(SPECIAL_NODEID), reverse_node_id(SPECIAL_NODEID), : forward_node_id(SPECIAL_NODEID), reverse_node_id(SPECIAL_NODEID),
name_id(std::numeric_limits<unsigned>::max()), forward_weight(INVALID_EDGE_WEIGHT), name_id(std::numeric_limits<unsigned>::max()), forward_weight(INVALID_EDGE_WEIGHT),
reverse_weight(INVALID_EDGE_WEIGHT), forward_offset(0), reverse_offset(0), reverse_weight(INVALID_EDGE_WEIGHT), forward_offset(0), reverse_offset(0),
packed_geometry_id(SPECIAL_EDGEID), component_id(std::numeric_limits<unsigned>::max()), packed_geometry_id(SPECIAL_EDGEID), component{INVALID_COMPONENTID, false},
fwd_segment_position(0), forward_travel_mode(TRAVEL_MODE_INACCESSIBLE), fwd_segment_position(0), forward_travel_mode(TRAVEL_MODE_INACCESSIBLE),
backward_travel_mode(TRAVEL_MODE_INACCESSIBLE) backward_travel_mode(TRAVEL_MODE_INACCESSIBLE)
{ {
@ -96,11 +97,9 @@ bool PhantomNode::is_valid(const unsigned number_of_nodes) const
return location.is_valid() && return location.is_valid() &&
((forward_node_id < number_of_nodes) || (reverse_node_id < number_of_nodes)) && ((forward_node_id < number_of_nodes) || (reverse_node_id < number_of_nodes)) &&
((forward_weight != INVALID_EDGE_WEIGHT) || (reverse_weight != INVALID_EDGE_WEIGHT)) && ((forward_weight != INVALID_EDGE_WEIGHT) || (reverse_weight != INVALID_EDGE_WEIGHT)) &&
(name_id != INVALID_NAMEID); (component.id != INVALID_COMPONENTID) && (name_id != INVALID_NAMEID);
} }
bool PhantomNode::is_in_tiny_component() const { return component_id != 0; }
bool PhantomNode::is_valid() const { return location.is_valid() && (name_id != INVALID_NAMEID); } bool PhantomNode::is_valid() const { return location.is_valid() && (name_id != INVALID_NAMEID); }
bool PhantomNode::operator==(const PhantomNode &other) const { return location == other.location; } bool PhantomNode::operator==(const PhantomNode &other) const { return location == other.location; }

View File

@ -47,6 +47,7 @@ struct PhantomNode
int forward_offset, int forward_offset,
int reverse_offset, int reverse_offset,
unsigned packed_geometry_id, unsigned packed_geometry_id,
bool is_tiny_component,
unsigned component_id, unsigned component_id,
FixedPointCoordinate &location, FixedPointCoordinate &location,
unsigned short fwd_segment_position, unsigned short fwd_segment_position,
@ -68,7 +69,9 @@ struct PhantomNode
reverse_offset = other.reverse_offset; reverse_offset = other.reverse_offset;
packed_geometry_id = other.packed_geometry_id; packed_geometry_id = other.packed_geometry_id;
component_id = other.component_id;
component.id = other.component.id;
component.is_tiny = other.component.is_tiny;
location = foot_point; location = foot_point;
fwd_segment_position = other.fwd_segment_position; fwd_segment_position = other.fwd_segment_position;
@ -85,7 +88,14 @@ struct PhantomNode
int forward_offset; int forward_offset;
int reverse_offset; int reverse_offset;
unsigned packed_geometry_id; unsigned packed_geometry_id;
unsigned component_id; struct ComponentType {
uint32_t id : 31;
bool is_tiny : 1;
} component;
// bit-fields are broken on Windows
#ifndef _MSC_VER
static_assert(sizeof(ComponentType) == 4, "ComponentType needs to 4 bytes big");
#endif
FixedPointCoordinate location; FixedPointCoordinate location;
unsigned short fwd_segment_position; unsigned short fwd_segment_position;
// note 4 bits would suffice for each, // note 4 bits would suffice for each,
@ -105,23 +115,19 @@ struct PhantomNode
bool is_valid() const; bool is_valid() const;
bool is_in_tiny_component() const;
bool operator==(const PhantomNode &other) const; bool operator==(const PhantomNode &other) const;
}; };
#ifndef _MSC_VER
static_assert(sizeof(PhantomNode) == 48, "PhantomNode has more padding then expected"); static_assert(sizeof(PhantomNode) == 48, "PhantomNode has more padding then expected");
#endif
using PhantomNodeArray = std::vector<std::vector<PhantomNode>>; using PhantomNodePair = std::pair<PhantomNode, PhantomNode>;
class phantom_node_pair : public std::pair<PhantomNode, PhantomNode> struct PhantomNodeWithDistance
{ {
}; PhantomNode phantom_node;
double distance;
struct PhantomNodeLists
{
std::vector<PhantomNode> source_phantom_list;
std::vector<PhantomNode> target_phantom_list;
}; };
struct PhantomNodes struct PhantomNodes
@ -147,7 +153,7 @@ inline std::ostream &operator<<(std::ostream &out, const PhantomNode &pn)
<< "fwd-o: " << pn.forward_offset << ", " << "fwd-o: " << pn.forward_offset << ", "
<< "rev-o: " << pn.reverse_offset << ", " << "rev-o: " << pn.reverse_offset << ", "
<< "geom: " << pn.packed_geometry_id << ", " << "geom: " << pn.packed_geometry_id << ", "
<< "comp: " << pn.component_id << ", " << "comp: " << pn.component.is_tiny << " / " << pn.component.id << ", "
<< "pos: " << pn.fwd_segment_position << ", " << "pos: " << pn.fwd_segment_position << ", "
<< "loc: " << pn.location; << "loc: " << pn.location;
return out; return out;

View File

@ -38,32 +38,32 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
struct QueryNode struct QueryNode
{ {
using key_type = NodeID; // type of NodeID using key_type = OSMNodeID; // type of NodeID
using value_type = int; // type of lat,lons using value_type = int; // type of lat,lons
explicit QueryNode(int lat, int lon, NodeID node_id) : lat(lat), lon(lon), node_id(node_id) {} explicit QueryNode(int lat, int lon, OSMNodeID node_id) : lat(lat), lon(lon), node_id(node_id) {}
QueryNode() QueryNode()
: lat(std::numeric_limits<int>::max()), lon(std::numeric_limits<int>::max()), : lat(std::numeric_limits<int>::max()), lon(std::numeric_limits<int>::max()),
node_id(std::numeric_limits<unsigned>::max()) node_id(SPECIAL_OSM_NODEID)
{ {
} }
int lat; int lat;
int lon; int lon;
NodeID node_id; OSMNodeID node_id;
static QueryNode min_value() static QueryNode min_value()
{ {
return QueryNode(static_cast<int>(-90 * COORDINATE_PRECISION), return QueryNode(static_cast<int>(-90 * COORDINATE_PRECISION),
static_cast<int>(-180 * COORDINATE_PRECISION), static_cast<int>(-180 * COORDINATE_PRECISION),
std::numeric_limits<NodeID>::min()); MIN_OSM_NODEID);
} }
static QueryNode max_value() static QueryNode max_value()
{ {
return QueryNode(static_cast<int>(90 * COORDINATE_PRECISION), return QueryNode(static_cast<int>(90 * COORDINATE_PRECISION),
static_cast<int>(180 * COORDINATE_PRECISION), static_cast<int>(180 * COORDINATE_PRECISION),
std::numeric_limits<NodeID>::max()); MAX_OSM_NODEID);
} }
value_type operator[](const std::size_t n) const value_type operator[](const std::size_t n) const

View File

@ -33,9 +33,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "shared_memory_vector_wrapper.hpp" #include "shared_memory_vector_wrapper.hpp"
#include <fstream> #include <fstream>
#include <vector>
#include <array> #include <array>
/* /*
* These pre-declarations are needed because parsing C++ is hard * These pre-declarations are needed because parsing C++ is hard
* and otherwise the compiler gets confused. * and otherwise the compiler gets confused.
@ -82,7 +80,8 @@ template <unsigned BLOCK_SIZE, bool USE_SHARED_MEMORY> class RangeTable
} }
// construct table from length vector // construct table from length vector
explicit RangeTable(const std::vector<unsigned> &lengths) template<typename VectorT>
explicit RangeTable(const VectorT &lengths)
{ {
const unsigned number_of_blocks = [&lengths]() const unsigned number_of_blocks = [&lengths]()
{ {

View File

@ -42,8 +42,8 @@ RasterSource::RasterSource(RasterGrid _raster_data,
int _ymin, int _ymin,
int _ymax) int _ymax)
: xstep(calcSize(_xmin, _xmax, _width)), ystep(calcSize(_ymin, _ymax, _height)), : xstep(calcSize(_xmin, _xmax, _width)), ystep(calcSize(_ymin, _ymax, _height)),
raster_data(_raster_data), width(_width), height(_height), xmin(_xmin), xmax(_xmax), raster_data(std::move(_raster_data)), width(_width), height(_height), xmin(_xmin),
ymin(_ymin), ymax(_ymax) xmax(_xmax), ymin(_ymin), ymax(_ymax)
{ {
BOOST_ASSERT(xstep != 0); BOOST_ASSERT(xstep != 0);
BOOST_ASSERT(ystep != 0); BOOST_ASSERT(ystep != 0);

View File

@ -121,35 +121,35 @@ struct RectangleInt2D
switch (d) switch (d)
{ {
case NORTH: case NORTH:
min_dist = coordinate_calculation::euclidean_distance( min_dist = coordinate_calculation::great_circle_distance(
location, FixedPointCoordinate(max_lat, location.lon)); location, FixedPointCoordinate(max_lat, location.lon));
break; break;
case SOUTH: case SOUTH:
min_dist = coordinate_calculation::euclidean_distance( min_dist = coordinate_calculation::great_circle_distance(
location, FixedPointCoordinate(min_lat, location.lon)); location, FixedPointCoordinate(min_lat, location.lon));
break; break;
case WEST: case WEST:
min_dist = coordinate_calculation::euclidean_distance( min_dist = coordinate_calculation::great_circle_distance(
location, FixedPointCoordinate(location.lat, min_lon)); location, FixedPointCoordinate(location.lat, min_lon));
break; break;
case EAST: case EAST:
min_dist = coordinate_calculation::euclidean_distance( min_dist = coordinate_calculation::great_circle_distance(
location, FixedPointCoordinate(location.lat, max_lon)); location, FixedPointCoordinate(location.lat, max_lon));
break; break;
case NORTH_EAST: case NORTH_EAST:
min_dist = coordinate_calculation::euclidean_distance( min_dist = coordinate_calculation::great_circle_distance(
location, FixedPointCoordinate(max_lat, max_lon)); location, FixedPointCoordinate(max_lat, max_lon));
break; break;
case NORTH_WEST: case NORTH_WEST:
min_dist = coordinate_calculation::euclidean_distance( min_dist = coordinate_calculation::great_circle_distance(
location, FixedPointCoordinate(max_lat, min_lon)); location, FixedPointCoordinate(max_lat, min_lon));
break; break;
case SOUTH_EAST: case SOUTH_EAST:
min_dist = coordinate_calculation::euclidean_distance( min_dist = coordinate_calculation::great_circle_distance(
location, FixedPointCoordinate(min_lat, max_lon)); location, FixedPointCoordinate(min_lat, max_lon));
break; break;
case SOUTH_WEST: case SOUTH_WEST:
min_dist = coordinate_calculation::euclidean_distance( min_dist = coordinate_calculation::great_circle_distance(
location, FixedPointCoordinate(min_lat, min_lon)); location, FixedPointCoordinate(min_lat, min_lon));
break; break;
default: default:
@ -170,25 +170,25 @@ struct RectangleInt2D
const FixedPointCoordinate lower_right(min_lat, max_lon); const FixedPointCoordinate lower_right(min_lat, max_lon);
const FixedPointCoordinate lower_left(min_lat, min_lon); const FixedPointCoordinate lower_left(min_lat, min_lon);
min_max_dist = min_max_dist = std::min(
std::min(min_max_dist, min_max_dist,
std::max(coordinate_calculation::euclidean_distance(location, upper_left), std::max(coordinate_calculation::great_circle_distance(location, upper_left),
coordinate_calculation::euclidean_distance(location, upper_right))); coordinate_calculation::great_circle_distance(location, upper_right)));
min_max_dist = std::min(
min_max_dist,
std::max(coordinate_calculation::great_circle_distance(location, upper_right),
coordinate_calculation::great_circle_distance(location, lower_right)));
min_max_dist = min_max_dist =
std::min(min_max_dist, std::min(min_max_dist,
std::max(coordinate_calculation::euclidean_distance(location, upper_right), std::max(coordinate_calculation::great_circle_distance(location, lower_right),
coordinate_calculation::euclidean_distance(location, lower_right))); coordinate_calculation::great_circle_distance(location, lower_left)));
min_max_dist = min_max_dist =
std::min(min_max_dist, std::min(min_max_dist,
std::max(coordinate_calculation::euclidean_distance(location, lower_right), std::max(coordinate_calculation::great_circle_distance(location, lower_left),
coordinate_calculation::euclidean_distance(location, lower_left))); coordinate_calculation::great_circle_distance(location, upper_left)));
min_max_dist =
std::min(min_max_dist,
std::max(coordinate_calculation::euclidean_distance(location, lower_left),
coordinate_calculation::euclidean_distance(location, upper_left)));
return min_max_dist; return min_max_dist;
} }
@ -198,14 +198,6 @@ struct RectangleInt2D
const bool lons_contained = (location.lon >= min_lon) && (location.lon <= max_lon); const bool lons_contained = (location.lon >= min_lon) && (location.lon <= max_lon);
return lats_contained && lons_contained; return lats_contained && lons_contained;
} }
friend std::ostream &operator<<(std::ostream &out, const RectangleInt2D &rect)
{
out << rect.min_lat / COORDINATE_PRECISION << "," << rect.min_lon / COORDINATE_PRECISION
<< " " << rect.max_lat / COORDINATE_PRECISION << ","
<< rect.max_lon / COORDINATE_PRECISION;
return out;
}
}; };
#endif #endif

View File

@ -36,8 +36,8 @@ struct TurnRestriction
{ {
union WayOrNode union WayOrNode
{ {
NodeID node; OSMNodeID_weak node;
EdgeID way; OSMEdgeID_weak way;
}; };
WayOrNode via; WayOrNode via;
WayOrNode from; WayOrNode from;

View File

@ -33,10 +33,16 @@ RestrictionMap::RestrictionMap(const std::vector<TurnRestriction> &restriction_l
// a pair of starting edge and a list of all end nodes // a pair of starting edge and a list of all end nodes
for (auto &restriction : restriction_list) for (auto &restriction : restriction_list)
{ {
// This downcasting is OK because when this is called, the node IDs have been
// renumbered into internal values, which should be well under 2^32
// This will be a problem if we have more than 2^32 actual restrictions
BOOST_ASSERT(restriction.from.node < std::numeric_limits<NodeID>::max());
BOOST_ASSERT(restriction.via.node < std::numeric_limits<NodeID>::max());
m_restriction_start_nodes.insert(restriction.from.node); m_restriction_start_nodes.insert(restriction.from.node);
m_no_turn_via_node_set.insert(restriction.via.node); m_no_turn_via_node_set.insert(restriction.via.node);
RestrictionSource restriction_source = {restriction.from.node, restriction.via.node}; // This explicit downcasting is also OK for the same reason.
RestrictionSource restriction_source = {static_cast<NodeID>(restriction.from.node), static_cast<NodeID>(restriction.via.node)};
std::size_t index; std::size_t index;
auto restriction_iter = m_restriction_map.find(restriction_source); auto restriction_iter = m_restriction_map.find(restriction_source);
@ -62,6 +68,7 @@ RestrictionMap::RestrictionMap(const std::vector<TurnRestriction> &restriction_l
} }
} }
++m_count; ++m_count;
BOOST_ASSERT(restriction.to.node < std::numeric_limits<NodeID>::max());
m_restriction_bucket_list.at(index) m_restriction_bucket_list.at(index)
.emplace_back(restriction.to.node, restriction.flags.is_only); .emplace_back(restriction.to.node, restriction.flags.is_only);
} }

View File

@ -28,6 +28,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <boost/fusion/container/vector.hpp> #include <boost/fusion/container/vector.hpp>
#include <boost/fusion/sequence/intrinsic.hpp> #include <boost/fusion/sequence/intrinsic.hpp>
#include <boost/fusion/include/at_c.hpp> #include <boost/fusion/include/at_c.hpp>
#include <boost/spirit/include/qi.hpp>
#include <osrm/route_parameters.hpp> #include <osrm/route_parameters.hpp>
@ -60,11 +61,9 @@ void RouteParameters::setAlternateRouteFlag(const bool flag) { alternate_route =
void RouteParameters::setUTurn(const bool flag) void RouteParameters::setUTurn(const bool flag)
{ {
uturns.resize(coordinates.size(), uturn_default); // the API grammar should make sure this never happens
if (!uturns.empty()) BOOST_ASSERT(!uturns.empty());
{ uturns.back() = flag;
uturns.back() = flag;
}
} }
void RouteParameters::setAllUTurns(const bool flag) void RouteParameters::setAllUTurns(const bool flag)
@ -117,6 +116,19 @@ void RouteParameters::addTimestamp(const unsigned timestamp)
} }
} }
void RouteParameters::addBearing(
const boost::fusion::vector<int, boost::optional<int>> &received_bearing,
boost::spirit::qi::unused_type /* unused */, bool& pass)
{
pass = false;
const int bearing = boost::fusion::at_c<0>(received_bearing);
const boost::optional<int> range = boost::fusion::at_c<1>(received_bearing);
if (bearing < 0 || bearing > 359) return;
if (range && (*range < 0 || *range > 180)) return;
bearings.emplace_back(std::make_pair(bearing,range));
pass = true;
}
void RouteParameters::setLanguage(const std::string &language_string) void RouteParameters::setLanguage(const std::string &language_string)
{ {
language = language_string; language = language_string;
@ -132,6 +144,31 @@ void RouteParameters::addCoordinate(
coordinates.emplace_back( coordinates.emplace_back(
static_cast<int>(COORDINATE_PRECISION * boost::fusion::at_c<0>(received_coordinates)), static_cast<int>(COORDINATE_PRECISION * boost::fusion::at_c<0>(received_coordinates)),
static_cast<int>(COORDINATE_PRECISION * boost::fusion::at_c<1>(received_coordinates))); static_cast<int>(COORDINATE_PRECISION * boost::fusion::at_c<1>(received_coordinates)));
is_source.push_back(true);
is_destination.push_back(true);
uturns.push_back(uturn_default);
}
void RouteParameters::addDestination(
const boost::fusion::vector<double, double> &received_coordinates)
{
coordinates.emplace_back(
static_cast<int>(COORDINATE_PRECISION * boost::fusion::at_c<0>(received_coordinates)),
static_cast<int>(COORDINATE_PRECISION * boost::fusion::at_c<1>(received_coordinates)));
is_source.push_back(false);
is_destination.push_back(true);
uturns.push_back(uturn_default);
}
void RouteParameters::addSource(
const boost::fusion::vector<double, double> &received_coordinates)
{
coordinates.emplace_back(
static_cast<int>(COORDINATE_PRECISION * boost::fusion::at_c<0>(received_coordinates)),
static_cast<int>(COORDINATE_PRECISION * boost::fusion::at_c<1>(received_coordinates)));
is_source.push_back(true);
is_destination.push_back(false);
uturns.push_back(uturn_default);
} }
void RouteParameters::getCoordinatesFromGeometry(const std::string &geometry_string) void RouteParameters::getCoordinatesFromGeometry(const std::string &geometry_string)
@ -139,3 +176,4 @@ void RouteParameters::getCoordinatesFromGeometry(const std::string &geometry_str
PolylineCompressor pc; PolylineCompressor pc;
coordinates = pc.decode_string(geometry_string); coordinates = pc.decode_string(geometry_string);
} }

View File

@ -43,7 +43,8 @@ struct SegmentInformation
NodeID name_id; NodeID name_id;
EdgeWeight duration; EdgeWeight duration;
float length; float length;
short bearing; // more than enough [0..3600] fits into 12 bits short pre_turn_bearing; // more than enough [0..3600] fits into 12 bits
short post_turn_bearing;
TurnInstruction turn_instruction; TurnInstruction turn_instruction;
TravelMode travel_mode; TravelMode travel_mode;
bool necessary; bool necessary;
@ -58,7 +59,7 @@ struct SegmentInformation
const bool is_via_location, const bool is_via_location,
const TravelMode travel_mode) const TravelMode travel_mode)
: location(std::move(location)), name_id(name_id), duration(duration), length(length), : location(std::move(location)), name_id(name_id), duration(duration), length(length),
bearing(0), turn_instruction(turn_instruction), travel_mode(travel_mode), pre_turn_bearing(0), post_turn_bearing(0), turn_instruction(turn_instruction), travel_mode(travel_mode),
necessary(necessary), is_via_location(is_via_location) necessary(necessary), is_via_location(is_via_location)
{ {
} }
@ -70,7 +71,7 @@ struct SegmentInformation
const TurnInstruction turn_instruction, const TurnInstruction turn_instruction,
const TravelMode travel_mode) const TravelMode travel_mode)
: location(std::move(location)), name_id(name_id), duration(duration), length(length), : location(std::move(location)), name_id(name_id), duration(duration), length(length),
bearing(0), turn_instruction(turn_instruction), travel_mode(travel_mode), pre_turn_bearing(0), post_turn_bearing(0), turn_instruction(turn_instruction), travel_mode(travel_mode),
necessary(turn_instruction != TurnInstruction::NoTurn), is_via_location(false) necessary(turn_instruction != TurnInstruction::NoTurn), is_via_location(false)
{ {
} }

View File

@ -30,19 +30,14 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "deallocating_vector.hpp" #include "deallocating_vector.hpp"
#include "hilbert_value.hpp" #include "hilbert_value.hpp"
#include "phantom_node.hpp"
#include "query_node.hpp"
#include "rectangle.hpp" #include "rectangle.hpp"
#include "shared_memory_factory.hpp" #include "shared_memory_factory.hpp"
#include "shared_memory_vector_wrapper.hpp" #include "shared_memory_vector_wrapper.hpp"
#include "upper_bound.hpp"
#include "../util/floating_point.hpp" #include "../util/bearing.hpp"
#include "../util/integer_range.hpp" #include "../util/integer_range.hpp"
#include "../util/mercator.hpp" #include "../util/mercator.hpp"
#include "../util/osrm_exception.hpp" #include "../util/osrm_exception.hpp"
#include "../util/simple_logger.hpp"
#include "../util/timing_util.hpp"
#include "../typedefs.h" #include "../typedefs.h"
#include <osrm/coordinate.hpp> #include <osrm/coordinate.hpp>
@ -50,7 +45,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <boost/assert.hpp> #include <boost/assert.hpp>
#include <boost/filesystem.hpp> #include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp> #include <boost/filesystem/fstream.hpp>
#include <boost/thread.hpp>
#include <tbb/parallel_for.h> #include <tbb/parallel_for.h>
#include <tbb/parallel_sort.h> #include <tbb/parallel_sort.h>
@ -65,7 +59,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <string> #include <string>
#include <vector> #include <vector>
// Implements a static, i.e. packed, R-tree // Static RTree for serving nearest neighbour queries
template <class EdgeDataT, template <class EdgeDataT,
class CoordinateListT = std::vector<FixedPointCoordinate>, class CoordinateListT = std::vector<FixedPointCoordinate>,
bool UseSharedMemory = false, bool UseSharedMemory = false,
@ -74,198 +68,16 @@ template <class EdgeDataT,
class StaticRTree class StaticRTree
{ {
public: public:
struct RectangleInt2D using Rectangle = RectangleInt2D;
{ using EdgeData = EdgeDataT;
RectangleInt2D() : min_lon(INT_MAX), max_lon(INT_MIN), min_lat(INT_MAX), max_lat(INT_MIN) {} using CoordinateList = CoordinateListT;
int32_t min_lon, max_lon; static constexpr std::size_t MAX_CHECKED_ELEMENTS = 4 * LEAF_NODE_SIZE;
int32_t min_lat, max_lat;
inline void InitializeMBRectangle(const std::array<EdgeDataT, LEAF_NODE_SIZE> &objects,
const uint32_t element_count,
const std::vector<QueryNode> &coordinate_list)
{
for (uint32_t i = 0; i < element_count; ++i)
{
min_lon = std::min(min_lon, std::min(coordinate_list.at(objects[i].u).lon,
coordinate_list.at(objects[i].v).lon));
max_lon = std::max(max_lon, std::max(coordinate_list.at(objects[i].u).lon,
coordinate_list.at(objects[i].v).lon));
min_lat = std::min(min_lat, std::min(coordinate_list.at(objects[i].u).lat,
coordinate_list.at(objects[i].v).lat));
max_lat = std::max(max_lat, std::max(coordinate_list.at(objects[i].u).lat,
coordinate_list.at(objects[i].v).lat));
}
BOOST_ASSERT(min_lat != std::numeric_limits<int>::min());
BOOST_ASSERT(min_lon != std::numeric_limits<int>::min());
BOOST_ASSERT(max_lat != std::numeric_limits<int>::min());
BOOST_ASSERT(max_lon != std::numeric_limits<int>::min());
}
inline void MergeBoundingBoxes(const RectangleInt2D &other)
{
min_lon = std::min(min_lon, other.min_lon);
max_lon = std::max(max_lon, other.max_lon);
min_lat = std::min(min_lat, other.min_lat);
max_lat = std::max(max_lat, other.max_lat);
BOOST_ASSERT(min_lat != std::numeric_limits<int>::min());
BOOST_ASSERT(min_lon != std::numeric_limits<int>::min());
BOOST_ASSERT(max_lat != std::numeric_limits<int>::min());
BOOST_ASSERT(max_lon != std::numeric_limits<int>::min());
}
inline FixedPointCoordinate Centroid() const
{
FixedPointCoordinate centroid;
// The coordinates of the midpoints are given by:
// x = (x1 + x2) /2 and y = (y1 + y2) /2.
centroid.lon = (min_lon + max_lon) / 2;
centroid.lat = (min_lat + max_lat) / 2;
return centroid;
}
inline bool Intersects(const RectangleInt2D &other) const
{
FixedPointCoordinate upper_left(other.max_lat, other.min_lon);
FixedPointCoordinate upper_right(other.max_lat, other.max_lon);
FixedPointCoordinate lower_right(other.min_lat, other.max_lon);
FixedPointCoordinate lower_left(other.min_lat, other.min_lon);
return (Contains(upper_left) || Contains(upper_right) || Contains(lower_right) ||
Contains(lower_left));
}
inline float GetMinDist(const FixedPointCoordinate &location) const
{
const bool is_contained = Contains(location);
if (is_contained)
{
return 0.;
}
enum Direction
{
INVALID = 0,
NORTH = 1,
SOUTH = 2,
EAST = 4,
NORTH_EAST = 5,
SOUTH_EAST = 6,
WEST = 8,
NORTH_WEST = 9,
SOUTH_WEST = 10
};
Direction d = INVALID;
if (location.lat > max_lat)
d = (Direction)(d | NORTH);
else if (location.lat < min_lat)
d = (Direction)(d | SOUTH);
if (location.lon > max_lon)
d = (Direction)(d | EAST);
else if (location.lon < min_lon)
d = (Direction)(d | WEST);
BOOST_ASSERT(d != INVALID);
float min_dist = std::numeric_limits<float>::max();
switch (d)
{
case NORTH:
min_dist = coordinate_calculation::euclidean_distance(
location, FixedPointCoordinate(max_lat, location.lon));
break;
case SOUTH:
min_dist = coordinate_calculation::euclidean_distance(
location, FixedPointCoordinate(min_lat, location.lon));
break;
case WEST:
min_dist = coordinate_calculation::euclidean_distance(
location, FixedPointCoordinate(location.lat, min_lon));
break;
case EAST:
min_dist = coordinate_calculation::euclidean_distance(
location, FixedPointCoordinate(location.lat, max_lon));
break;
case NORTH_EAST:
min_dist = coordinate_calculation::euclidean_distance(
location, FixedPointCoordinate(max_lat, max_lon));
break;
case NORTH_WEST:
min_dist = coordinate_calculation::euclidean_distance(
location, FixedPointCoordinate(max_lat, min_lon));
break;
case SOUTH_EAST:
min_dist = coordinate_calculation::euclidean_distance(
location, FixedPointCoordinate(min_lat, max_lon));
break;
case SOUTH_WEST:
min_dist = coordinate_calculation::euclidean_distance(
location, FixedPointCoordinate(min_lat, min_lon));
break;
default:
break;
}
BOOST_ASSERT(min_dist != std::numeric_limits<float>::max());
return min_dist;
}
inline float GetMinMaxDist(const FixedPointCoordinate &location) const
{
float min_max_dist = std::numeric_limits<float>::max();
// Get minmax distance to each of the four sides
const FixedPointCoordinate upper_left(max_lat, min_lon);
const FixedPointCoordinate upper_right(max_lat, max_lon);
const FixedPointCoordinate lower_right(min_lat, max_lon);
const FixedPointCoordinate lower_left(min_lat, min_lon);
min_max_dist = std::min(
min_max_dist,
std::max(coordinate_calculation::euclidean_distance(location, upper_left),
coordinate_calculation::euclidean_distance(location, upper_right)));
min_max_dist = std::min(
min_max_dist,
std::max(coordinate_calculation::euclidean_distance(location, upper_right),
coordinate_calculation::euclidean_distance(location, lower_right)));
min_max_dist = std::min(
min_max_dist,
std::max(coordinate_calculation::euclidean_distance(location, lower_right),
coordinate_calculation::euclidean_distance(location, lower_left)));
min_max_dist = std::min(
min_max_dist,
std::max(coordinate_calculation::euclidean_distance(location, lower_left),
coordinate_calculation::euclidean_distance(location, upper_left)));
return min_max_dist;
}
inline bool Contains(const FixedPointCoordinate &location) const
{
const bool lats_contained = (location.lat >= min_lat) && (location.lat <= max_lat);
const bool lons_contained = (location.lon >= min_lon) && (location.lon <= max_lon);
return lats_contained && lons_contained;
}
inline friend std::ostream &operator<<(std::ostream &out, const RectangleInt2D &rect)
{
out << rect.min_lat / COORDINATE_PRECISION << "," << rect.min_lon / COORDINATE_PRECISION
<< " " << rect.max_lat / COORDINATE_PRECISION << ","
<< rect.max_lon / COORDINATE_PRECISION;
return out;
}
};
using RectangleT = RectangleInt2D;
struct TreeNode struct TreeNode
{ {
TreeNode() : child_count(0), child_is_on_disk(false) {} TreeNode() : child_count(0), child_is_on_disk(false) {}
RectangleT minimum_bounding_rectangle; Rectangle minimum_bounding_rectangle;
uint32_t child_count : 31; uint32_t child_count : 31;
bool child_is_on_disk : 1; bool child_is_on_disk : 1;
uint32_t children[BRANCHING_FACTOR]; uint32_t children[BRANCHING_FACTOR];
@ -297,40 +109,17 @@ class StaticRTree
std::array<EdgeDataT, LEAF_NODE_SIZE> objects; std::array<EdgeDataT, LEAF_NODE_SIZE> objects;
}; };
using QueryNodeType = mapbox::util::variant<TreeNode, EdgeDataT>;
struct QueryCandidate struct QueryCandidate
{ {
explicit QueryCandidate(const float dist, const uint32_t n_id)
: min_dist(dist), node_id(n_id)
{
}
QueryCandidate() : min_dist(std::numeric_limits<float>::max()), node_id(UINT_MAX) {}
float min_dist;
uint32_t node_id;
inline bool operator<(const QueryCandidate &other) const inline bool operator<(const QueryCandidate &other) const
{ {
// Attn: this is reversed order. std::pq is a max pq! // Attn: this is reversed order. std::pq is a max pq!
return other.min_dist < min_dist; return other.min_dist < min_dist;
} }
};
using IncrementalQueryNodeType = mapbox::util::variant<TreeNode, EdgeDataT>;
struct IncrementalQueryCandidate
{
explicit IncrementalQueryCandidate(const float dist, IncrementalQueryNodeType node)
: min_dist(dist), node(std::move(node))
{
}
IncrementalQueryCandidate() : min_dist(std::numeric_limits<float>::max()) {}
inline bool operator<(const IncrementalQueryCandidate &other) const
{
// Attn: this is reversed order. std::pq is a max pq!
return other.min_dist < min_dist;
}
float min_dist; float min_dist;
IncrementalQueryNodeType node; QueryNodeType node;
}; };
typename ShM<TreeNode, UseSharedMemory>::vector m_search_tree; typename ShM<TreeNode, UseSharedMemory>::vector m_search_tree;
@ -343,18 +132,14 @@ class StaticRTree
StaticRTree() = delete; StaticRTree() = delete;
StaticRTree(const StaticRTree &) = delete; StaticRTree(const StaticRTree &) = delete;
template <typename CoordinateT>
// Construct a packed Hilbert-R-Tree with Kamel-Faloutsos algorithm [1] // Construct a packed Hilbert-R-Tree with Kamel-Faloutsos algorithm [1]
explicit StaticRTree(const std::vector<EdgeDataT> &input_data_vector, explicit StaticRTree(const std::vector<EdgeDataT> &input_data_vector,
const std::string &tree_node_filename, const std::string &tree_node_filename,
const std::string &leaf_node_filename, const std::string &leaf_node_filename,
const std::vector<QueryNode> &coordinate_list) const std::vector<CoordinateT> &coordinate_list)
: m_element_count(input_data_vector.size()), m_leaf_node_filename(leaf_node_filename) : m_element_count(input_data_vector.size()), m_leaf_node_filename(leaf_node_filename)
{ {
SimpleLogger().Write() << "constructing r-tree of " << m_element_count
<< " edge elements build on-top of " << coordinate_list.size()
<< " coordinates";
TIMER_START(construction);
std::vector<WrappedInputElement> input_wrapper_vector(m_element_count); std::vector<WrappedInputElement> input_wrapper_vector(m_element_count);
HilbertCode get_hilbert_number; HilbertCode get_hilbert_number;
@ -362,8 +147,8 @@ class StaticRTree
// generate auxiliary vector of hilbert-values // generate auxiliary vector of hilbert-values
tbb::parallel_for( tbb::parallel_for(
tbb::blocked_range<uint64_t>(0, m_element_count), tbb::blocked_range<uint64_t>(0, m_element_count),
[&input_data_vector, &input_wrapper_vector, &get_hilbert_number, &coordinate_list]( [&input_data_vector, &input_wrapper_vector, &get_hilbert_number,
const tbb::blocked_range<uint64_t> &range) &coordinate_list](const tbb::blocked_range<uint64_t> &range)
{ {
for (uint64_t element_counter = range.begin(), end = range.end(); for (uint64_t element_counter = range.begin(), end = range.end();
element_counter != end; ++element_counter) element_counter != end; ++element_counter)
@ -402,8 +187,6 @@ class StaticRTree
LeafNode current_leaf; LeafNode current_leaf;
TreeNode current_node; TreeNode current_node;
// SimpleLogger().Write() << "reading " << tree_size << " tree nodes in " <<
// (sizeof(TreeNode)*tree_size) << " bytes";
for (uint32_t current_element_index = 0; LEAF_NODE_SIZE > current_element_index; for (uint32_t current_element_index = 0; LEAF_NODE_SIZE > current_element_index;
++current_element_index) ++current_element_index)
{ {
@ -497,13 +280,8 @@ class StaticRTree
tree_node_file.write((char *)&m_search_tree[0], sizeof(TreeNode) * size_of_tree); tree_node_file.write((char *)&m_search_tree[0], sizeof(TreeNode) * size_of_tree);
// close tree node file. // close tree node file.
tree_node_file.close(); tree_node_file.close();
TIMER_STOP(construction);
SimpleLogger().Write() << "finished r-tree construction in " << TIMER_SEC(construction)
<< " seconds";
} }
// Read-only operation for queries
explicit StaticRTree(const boost::filesystem::path &node_file, explicit StaticRTree(const boost::filesystem::path &node_file,
const boost::filesystem::path &leaf_file, const boost::filesystem::path &leaf_file,
const std::shared_ptr<CoordinateListT> coordinate_list) const std::shared_ptr<CoordinateListT> coordinate_list)
@ -543,9 +321,6 @@ class StaticRTree
leaves_stream.open(leaf_file, std::ios::binary); leaves_stream.open(leaf_file, std::ios::binary);
leaves_stream.read((char *)&m_element_count, sizeof(uint64_t)); leaves_stream.read((char *)&m_element_count, sizeof(uint64_t));
// SimpleLogger().Write() << tree_size << " nodes in search tree";
// SimpleLogger().Write() << m_element_count << " elements in leafs";
} }
explicit StaticRTree(TreeNode *tree_node_ptr, explicit StaticRTree(TreeNode *tree_node_ptr,
@ -567,108 +342,47 @@ class StaticRTree
leaves_stream.open(leaf_file, std::ios::binary); leaves_stream.open(leaf_file, std::ios::binary);
leaves_stream.read((char *)&m_element_count, sizeof(uint64_t)); leaves_stream.read((char *)&m_element_count, sizeof(uint64_t));
// SimpleLogger().Write() << tree_size << " nodes in search tree";
// SimpleLogger().Write() << m_element_count << " elements in leafs";
}
// Read-only operation for queries
bool LocateClosestEndPointForCoordinate(const FixedPointCoordinate &input_coordinate,
FixedPointCoordinate &result_coordinate,
const unsigned zoom_level)
{
bool ignore_tiny_components = (zoom_level <= 14);
float min_dist = std::numeric_limits<float>::max();
float min_max_dist = std::numeric_limits<float>::max();
// initialize queue with root element
std::priority_queue<QueryCandidate> traversal_queue;
traversal_queue.emplace(0.f, 0);
while (!traversal_queue.empty())
{
const QueryCandidate current_query_node = traversal_queue.top();
traversal_queue.pop();
const bool prune_downward = (current_query_node.min_dist >= min_max_dist);
const bool prune_upward = (current_query_node.min_dist >= min_dist);
if (!prune_downward && !prune_upward)
{ // downward pruning
TreeNode &current_tree_node = m_search_tree[current_query_node.node_id];
if (current_tree_node.child_is_on_disk)
{
LeafNode current_leaf_node;
LoadLeafFromDisk(current_tree_node.children[0], current_leaf_node);
for (uint32_t i = 0; i < current_leaf_node.object_count; ++i)
{
EdgeDataT const &current_edge = current_leaf_node.objects[i];
if (ignore_tiny_components && current_edge.component_id != 0)
{
continue;
}
float current_minimum_distance = coordinate_calculation::euclidean_distance(
input_coordinate.lat, input_coordinate.lon,
m_coordinate_list->at(current_edge.u).lat,
m_coordinate_list->at(current_edge.u).lon);
if (current_minimum_distance < min_dist)
{
// found a new minimum
min_dist = current_minimum_distance;
result_coordinate = m_coordinate_list->at(current_edge.u);
}
current_minimum_distance = coordinate_calculation::euclidean_distance(
input_coordinate.lat, input_coordinate.lon,
m_coordinate_list->at(current_edge.v).lat,
m_coordinate_list->at(current_edge.v).lon);
if (current_minimum_distance < min_dist)
{
// found a new minimum
min_dist = current_minimum_distance;
result_coordinate = m_coordinate_list->at(current_edge.v);
}
}
}
else
{
min_max_dist = ExploreTreeNode(current_tree_node, input_coordinate, min_dist,
min_max_dist, traversal_queue);
}
}
}
return result_coordinate.is_valid();
} }
bool IncrementalFindPhantomNodeForCoordinate( // Override filter and terminator for the desired behaviour.
const FixedPointCoordinate &input_coordinate, std::vector<EdgeDataT> Nearest(const FixedPointCoordinate &input_coordinate,
std::vector<PhantomNode> &result_phantom_node_vector, const std::size_t max_results)
const unsigned max_number_of_phantom_nodes,
const float max_distance = 1100,
const unsigned max_checked_elements = 4 * LEAF_NODE_SIZE)
{ {
unsigned inspected_elements = 0; return Nearest(input_coordinate,
unsigned number_of_elements_from_big_cc = 0; [](const EdgeDataT &)
unsigned number_of_elements_from_tiny_cc = 0; {
return std::make_pair(true, true);
},
[max_results](const std::size_t num_results, const float)
{
return num_results >= max_results;
});
}
// Override filter and terminator for the desired behaviour.
template <typename FilterT, typename TerminationT>
std::vector<EdgeDataT> Nearest(const FixedPointCoordinate &input_coordinate,
const FilterT filter,
const TerminationT terminate)
{
std::vector<EdgeDataT> results;
std::pair<double, double> projected_coordinate = { std::pair<double, double> projected_coordinate = {
mercator::lat2y(input_coordinate.lat / COORDINATE_PRECISION), mercator::lat2y(input_coordinate.lat / COORDINATE_PRECISION),
input_coordinate.lon / COORDINATE_PRECISION}; input_coordinate.lon / COORDINATE_PRECISION};
// initialize queue with root element // initialize queue with root element
std::priority_queue<IncrementalQueryCandidate> traversal_queue; std::priority_queue<QueryCandidate> traversal_queue;
traversal_queue.emplace(0.f, m_search_tree[0]); traversal_queue.push(QueryCandidate {0.f, m_search_tree[0]});
while (!traversal_queue.empty()) while (!traversal_queue.empty())
{ {
const IncrementalQueryCandidate current_query_node = traversal_queue.top(); const QueryCandidate current_query_node = traversal_queue.top();
if (current_query_node.min_dist > max_distance && if (terminate(results.size(), current_query_node.min_dist))
inspected_elements > max_checked_elements)
{ {
traversal_queue = std::priority_queue<QueryCandidate>{};
break; break;
} }
traversal_queue.pop(); traversal_queue.pop();
if (current_query_node.node.template is<TreeNode>()) if (current_query_node.node.template is<TreeNode>())
@ -677,396 +391,81 @@ class StaticRTree
current_query_node.node.template get<TreeNode>(); current_query_node.node.template get<TreeNode>();
if (current_tree_node.child_is_on_disk) if (current_tree_node.child_is_on_disk)
{ {
LeafNode current_leaf_node; ExploreLeafNode(current_tree_node.children[0], input_coordinate,
LoadLeafFromDisk(current_tree_node.children[0], current_leaf_node); projected_coordinate, traversal_queue);
// current object represents a block on disk
for (const auto i : osrm::irange(0u, current_leaf_node.object_count))
{
const auto &current_edge = current_leaf_node.objects[i];
const float current_perpendicular_distance = coordinate_calculation::
perpendicular_distance_from_projected_coordinate(
m_coordinate_list->at(current_edge.u),
m_coordinate_list->at(current_edge.v), input_coordinate,
projected_coordinate);
// distance must be non-negative
BOOST_ASSERT(0.f <= current_perpendicular_distance);
traversal_queue.emplace(current_perpendicular_distance, current_edge);
}
} }
else else
{ {
// for each child mbr get a lower bound and enqueue it ExploreTreeNode(current_tree_node, input_coordinate, traversal_queue);
for (const auto i : osrm::irange(0u, current_tree_node.child_count))
{
const int32_t child_id = current_tree_node.children[i];
const TreeNode &child_tree_node = m_search_tree[child_id];
const RectangleT &child_rectangle =
child_tree_node.minimum_bounding_rectangle;
const float lower_bound_to_element =
child_rectangle.GetMinDist(input_coordinate);
BOOST_ASSERT(0.f <= lower_bound_to_element);
traversal_queue.emplace(lower_bound_to_element, child_tree_node);
}
} }
} }
else else
{ // current object is a leaf node {
++inspected_elements;
// inspecting an actual road segment // inspecting an actual road segment
const EdgeDataT &current_segment = const auto &current_segment = current_query_node.node.template get<EdgeDataT>();
current_query_node.node.template get<EdgeDataT>();
// continue searching for the first segment from a big component
if (number_of_elements_from_big_cc == 0 && auto use_segment = filter(current_segment);
number_of_elements_from_tiny_cc >= max_number_of_phantom_nodes && if (!use_segment.first && !use_segment.second)
current_segment.is_in_tiny_cc())
{ {
continue; continue;
} }
// check if it is smaller than what we had before
float current_ratio = 0.f;
FixedPointCoordinate foot_point_coordinate_on_segment;
// const float current_perpendicular_distance =
coordinate_calculation::perpendicular_distance_from_projected_coordinate(
m_coordinate_list->at(current_segment.u),
m_coordinate_list->at(current_segment.v), input_coordinate,
projected_coordinate, foot_point_coordinate_on_segment, current_ratio);
// store phantom node in result vector // store phantom node in result vector
result_phantom_node_vector.emplace_back(current_segment, results.push_back(std::move(current_segment));
foot_point_coordinate_on_segment);
// Hack to fix rounding errors and wandering via nodes. if (!use_segment.first)
FixUpRoundingIssue(input_coordinate, result_phantom_node_vector.back());
// set forward and reverse weights on the phantom node
SetForwardAndReverseWeightsOnPhantomNode(current_segment,
result_phantom_node_vector.back());
// update counts on what we found from which result class
if (current_segment.is_in_tiny_cc())
{ // found an element in tiny component
++number_of_elements_from_tiny_cc;
}
else
{ // found an element in a big component
++number_of_elements_from_big_cc;
}
}
// stop the search by flushing the queue
if (result_phantom_node_vector.size() >= max_number_of_phantom_nodes &&
number_of_elements_from_big_cc > 0)
{
traversal_queue = std::priority_queue<IncrementalQueryCandidate>{};
}
}
#ifdef NDEBUG
// SimpleLogger().Write() << "result_phantom_node_vector.size(): " <<
// result_phantom_node_vector.size();
// SimpleLogger().Write() << "max_number_of_phantom_nodes: " << max_number_of_phantom_nodes;
// SimpleLogger().Write() << "number_of_elements_from_big_cc: " <<
// number_of_elements_from_big_cc;
// SimpleLogger().Write() << "number_of_elements_from_tiny_cc: " <<
// number_of_elements_from_tiny_cc;
// SimpleLogger().Write() << "inspected_elements: " << inspected_elements;
// SimpleLogger().Write() << "max_checked_elements: " << max_checked_elements;
// SimpleLogger().Write() << "pruned_elements: " << pruned_elements;
#endif
return !result_phantom_node_vector.empty();
}
// Returns elements within max_distance.
// If the minium of elements could not be found in the search radius, widen
// it until the minimum can be satisfied.
bool IncrementalFindPhantomNodeForCoordinateWithDistance(
const FixedPointCoordinate &input_coordinate,
std::vector<std::pair<PhantomNode, double>> &result_phantom_node_vector,
const double max_distance,
const unsigned max_checked_elements = 4 * LEAF_NODE_SIZE)
{
unsigned inspected_elements = 0;
std::pair<double, double> projected_coordinate = {
mercator::lat2y(input_coordinate.lat / COORDINATE_PRECISION),
input_coordinate.lon / COORDINATE_PRECISION};
// initialize queue with root element
std::priority_queue<IncrementalQueryCandidate> traversal_queue;
traversal_queue.emplace(0.f, m_search_tree[0]);
while (!traversal_queue.empty())
{
const IncrementalQueryCandidate current_query_node = traversal_queue.top();
traversal_queue.pop();
if (current_query_node.min_dist > max_distance ||
inspected_elements >= max_checked_elements)
{
break;
}
if (current_query_node.node.template is<TreeNode>())
{ // current object is a tree node
const TreeNode &current_tree_node =
current_query_node.node.template get<TreeNode>();
if (current_tree_node.child_is_on_disk)
{ {
LeafNode current_leaf_node; results.back().forward_edge_based_node_id = SPECIAL_NODEID;
LoadLeafFromDisk(current_tree_node.children[0], current_leaf_node);
// current object represents a block on disk
for (const auto i : osrm::irange(0u, current_leaf_node.object_count))
{
const auto &current_edge = current_leaf_node.objects[i];
const float current_perpendicular_distance = coordinate_calculation::
perpendicular_distance_from_projected_coordinate(
m_coordinate_list->at(current_edge.u),
m_coordinate_list->at(current_edge.v), input_coordinate,
projected_coordinate);
// distance must be non-negative
BOOST_ASSERT(0.f <= current_perpendicular_distance);
if (current_perpendicular_distance <= max_distance)
{
traversal_queue.emplace(current_perpendicular_distance, current_edge);
}
}
} }
else else if (!use_segment.second)
{ {
// for each child mbr get a lower bound and enqueue it results.back().reverse_edge_based_node_id = SPECIAL_NODEID;
for (const auto i : osrm::irange(0u, current_tree_node.child_count))
{
const int32_t child_id = current_tree_node.children[i];
const TreeNode &child_tree_node = m_search_tree[child_id];
const RectangleT &child_rectangle =
child_tree_node.minimum_bounding_rectangle;
const float lower_bound_to_element =
child_rectangle.GetMinDist(input_coordinate);
BOOST_ASSERT(0.f <= lower_bound_to_element);
if (lower_bound_to_element <= max_distance)
{
traversal_queue.emplace(lower_bound_to_element, child_tree_node);
}
}
}
}
else
{ // current object is a leaf node
++inspected_elements;
// inspecting an actual road segment
const EdgeDataT &current_segment =
current_query_node.node.template get<EdgeDataT>();
// check if it is smaller than what we had before
float current_ratio = 0.f;
FixedPointCoordinate foot_point_coordinate_on_segment;
const float current_perpendicular_distance =
coordinate_calculation::perpendicular_distance_from_projected_coordinate(
m_coordinate_list->at(current_segment.u),
m_coordinate_list->at(current_segment.v), input_coordinate,
projected_coordinate, foot_point_coordinate_on_segment, current_ratio);
if (current_perpendicular_distance >= max_distance)
{
traversal_queue = std::priority_queue<IncrementalQueryCandidate>{};
continue;
}
// store phantom node in result vector
result_phantom_node_vector.emplace_back(
PhantomNode(
current_segment.forward_edge_based_node_id,
current_segment.reverse_edge_based_node_id, current_segment.name_id,
current_segment.forward_weight, current_segment.reverse_weight,
current_segment.forward_offset, current_segment.reverse_offset,
current_segment.packed_geometry_id, current_segment.component_id,
foot_point_coordinate_on_segment, current_segment.fwd_segment_position,
current_segment.forward_travel_mode, current_segment.backward_travel_mode),
current_perpendicular_distance);
// Hack to fix rounding errors and wandering via nodes.
FixUpRoundingIssue(input_coordinate, result_phantom_node_vector.back().first);
// set forward and reverse weights on the phantom node
SetForwardAndReverseWeightsOnPhantomNode(current_segment,
result_phantom_node_vector.back().first);
}
// stop the search by flushing the queue
if (inspected_elements >= max_checked_elements)
{
traversal_queue = std::priority_queue<IncrementalQueryCandidate>{};
}
}
return !result_phantom_node_vector.empty();
}
bool FindPhantomNodeForCoordinate(const FixedPointCoordinate &input_coordinate,
PhantomNode &result_phantom_node,
const unsigned zoom_level)
{
const bool ignore_tiny_components = (zoom_level <= 14);
EdgeDataT nearest_edge;
float min_dist = std::numeric_limits<float>::max();
float min_max_dist = std::numeric_limits<float>::max();
std::priority_queue<QueryCandidate> traversal_queue;
traversal_queue.emplace(0.f, 0);
while (!traversal_queue.empty())
{
const QueryCandidate current_query_node = traversal_queue.top();
traversal_queue.pop();
const bool prune_downward = (current_query_node.min_dist > min_max_dist);
const bool prune_upward = (current_query_node.min_dist > min_dist);
if (!prune_downward && !prune_upward)
{ // downward pruning
const TreeNode &current_tree_node = m_search_tree[current_query_node.node_id];
if (current_tree_node.child_is_on_disk)
{
LeafNode current_leaf_node;
LoadLeafFromDisk(current_tree_node.children[0], current_leaf_node);
for (uint32_t i = 0; i < current_leaf_node.object_count; ++i)
{
const EdgeDataT &current_edge = current_leaf_node.objects[i];
if (ignore_tiny_components && current_edge.component_id != 0)
{
continue;
}
float current_ratio = 0.;
FixedPointCoordinate nearest;
const float current_perpendicular_distance =
coordinate_calculation::perpendicular_distance(
m_coordinate_list->at(current_edge.u),
m_coordinate_list->at(current_edge.v), input_coordinate, nearest,
current_ratio);
BOOST_ASSERT(0. <= current_perpendicular_distance);
if ((current_perpendicular_distance < min_dist) &&
!osrm::epsilon_compare(current_perpendicular_distance, min_dist))
{ // found a new minimum
min_dist = current_perpendicular_distance;
result_phantom_node = {current_edge.forward_edge_based_node_id,
current_edge.reverse_edge_based_node_id,
current_edge.name_id,
current_edge.forward_weight,
current_edge.reverse_weight,
current_edge.forward_offset,
current_edge.reverse_offset,
current_edge.packed_geometry_id,
current_edge.component_id,
nearest,
current_edge.fwd_segment_position,
current_edge.forward_travel_mode,
current_edge.backward_travel_mode};
nearest_edge = current_edge;
}
}
}
else
{
min_max_dist = ExploreTreeNode(current_tree_node, input_coordinate, min_dist,
min_max_dist, traversal_queue);
} }
} }
} }
if (result_phantom_node.location.is_valid()) return results;
{
// Hack to fix rounding errors and wandering via nodes.
FixUpRoundingIssue(input_coordinate, result_phantom_node);
// set forward and reverse weights on the phantom node
SetForwardAndReverseWeightsOnPhantomNode(nearest_edge, result_phantom_node);
}
return result_phantom_node.location.is_valid();
} }
private: private:
inline void SetForwardAndReverseWeightsOnPhantomNode(const EdgeDataT &nearest_edge, template <typename QueueT>
PhantomNode &result_phantom_node) const void ExploreLeafNode(const std::uint32_t leaf_id,
const FixedPointCoordinate &input_coordinate,
const std::pair<double, double> &projected_coordinate,
QueueT &traversal_queue)
{ {
const float distance_1 = coordinate_calculation::euclidean_distance( LeafNode current_leaf_node;
m_coordinate_list->at(nearest_edge.u), result_phantom_node.location); LoadLeafFromDisk(leaf_id, current_leaf_node);
const float distance_2 = coordinate_calculation::euclidean_distance(
m_coordinate_list->at(nearest_edge.u), m_coordinate_list->at(nearest_edge.v));
const float ratio = std::min(1.f, distance_1 / distance_2);
using TreeWeightType = decltype(result_phantom_node.forward_weight); // current object represents a block on disk
static_assert(std::is_same<decltype(result_phantom_node.forward_weight), for (const auto i : osrm::irange(0u, current_leaf_node.object_count))
decltype(result_phantom_node.reverse_weight)>::value, {
"forward and reverse weight type in tree must be the same"); auto &current_edge = current_leaf_node.objects[i];
const float current_perpendicular_distance =
coordinate_calculation::perpendicular_distance_from_projected_coordinate(
m_coordinate_list->at(current_edge.u), m_coordinate_list->at(current_edge.v),
input_coordinate, projected_coordinate);
// distance must be non-negative
BOOST_ASSERT(0.f <= current_perpendicular_distance);
if (SPECIAL_NODEID != result_phantom_node.forward_node_id) traversal_queue.push(QueryCandidate {current_perpendicular_distance, std::move(current_edge)});
{
const auto new_weight =
static_cast<TreeWeightType>(result_phantom_node.forward_weight * ratio);
result_phantom_node.forward_weight = new_weight;
}
if (SPECIAL_NODEID != result_phantom_node.reverse_node_id)
{
const auto new_weight =
static_cast<TreeWeightType>(result_phantom_node.reverse_weight * (1.f - ratio));
result_phantom_node.reverse_weight = new_weight;
}
}
// fixup locations if too close to inputs
inline void FixUpRoundingIssue(const FixedPointCoordinate &input_coordinate,
PhantomNode &result_phantom_node) const
{
if (1 == std::abs(input_coordinate.lon - result_phantom_node.location.lon))
{
result_phantom_node.location.lon = input_coordinate.lon;
}
if (1 == std::abs(input_coordinate.lat - result_phantom_node.location.lat))
{
result_phantom_node.location.lat = input_coordinate.lat;
} }
} }
template <class QueueT> template <class QueueT>
inline float ExploreTreeNode(const TreeNode &parent, void ExploreTreeNode(const TreeNode &parent,
const FixedPointCoordinate &input_coordinate, const FixedPointCoordinate &input_coordinate,
const float min_dist, QueueT &traversal_queue)
const float min_max_dist,
QueueT &traversal_queue)
{ {
float new_min_max_dist = min_max_dist;
// traverse children, prune if global mindist is smaller than local one
for (uint32_t i = 0; i < parent.child_count; ++i) for (uint32_t i = 0; i < parent.child_count; ++i)
{ {
const int32_t child_id = parent.children[i]; const int32_t child_id = parent.children[i];
const TreeNode &child_tree_node = m_search_tree[child_id]; const auto &child_tree_node = m_search_tree[child_id];
const RectangleT &child_rectangle = child_tree_node.minimum_bounding_rectangle; const auto &child_rectangle = child_tree_node.minimum_bounding_rectangle;
const float lower_bound_to_element = child_rectangle.GetMinDist(input_coordinate); const float lower_bound_to_element = child_rectangle.GetMinDist(input_coordinate);
const float upper_bound_to_element = child_rectangle.GetMinMaxDist(input_coordinate); traversal_queue.push(QueryCandidate {lower_bound_to_element, m_search_tree[child_id]});
new_min_max_dist = std::min(new_min_max_dist, upper_bound_to_element);
if (lower_bound_to_element > new_min_max_dist)
{
continue;
}
if (lower_bound_to_element > min_dist)
{
continue;
}
traversal_queue.emplace(lower_bound_to_element, child_id);
} }
return new_min_max_dist;
} }
inline void LoadLeafFromDisk(const uint32_t leaf_id, LeafNode &result_node) inline void LoadLeafFromDisk(const uint32_t leaf_id, LeafNode &result_node)
@ -1077,8 +476,7 @@ class StaticRTree
} }
if (!leaves_stream.good()) if (!leaves_stream.good())
{ {
leaves_stream.clear(std::ios::goodbit); throw osrm::exception("Could not read from leaf file.");
SimpleLogger().Write(logDEBUG) << "Resetting stale filestream";
} }
const uint64_t seek_pos = sizeof(uint64_t) + leaf_id * sizeof(LeafNode); const uint64_t seek_pos = sizeof(uint64_t) + leaf_id * sizeof(LeafNode);
leaves_stream.seekg(seek_pos); leaves_stream.seekg(seek_pos);
@ -1087,18 +485,11 @@ class StaticRTree
BOOST_ASSERT_MSG(leaves_stream.good(), "Reading from leaf file failed."); BOOST_ASSERT_MSG(leaves_stream.good(), "Reading from leaf file failed.");
} }
inline bool EdgesAreEquivalent(const FixedPointCoordinate &a, template <typename CoordinateT>
const FixedPointCoordinate &b, void InitializeMBRectangle(Rectangle &rectangle,
const FixedPointCoordinate &c, const std::array<EdgeDataT, LEAF_NODE_SIZE> &objects,
const FixedPointCoordinate &d) const const uint32_t element_count,
{ const std::vector<CoordinateT> &coordinate_list)
return (a == b && c == d) || (a == c && b == d) || (a == d && b == c);
}
inline void InitializeMBRectangle(RectangleT &rectangle,
const std::array<EdgeDataT, LEAF_NODE_SIZE> &objects,
const uint32_t element_count,
const std::vector<QueryNode> &coordinate_list)
{ {
for (uint32_t i = 0; i < element_count; ++i) for (uint32_t i = 0; i < element_count; ++i)
{ {

View File

@ -38,7 +38,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "server/data_structures/datafacade_base.hpp" #include "server/data_structures/datafacade_base.hpp"
#include "server/data_structures/shared_datatype.hpp" #include "server/data_structures/shared_datatype.hpp"
#include "server/data_structures/shared_barriers.hpp" #include "server/data_structures/shared_barriers.hpp"
#include "util/boost_filesystem_2_fix.hpp"
#include "util/datastore_options.hpp" #include "util/datastore_options.hpp"
#include "util/simple_logger.hpp" #include "util/simple_logger.hpp"
#include "util/osrm_exception.hpp" #include "util/osrm_exception.hpp"
@ -46,7 +45,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "typedefs.h" #include "typedefs.h"
#include <osrm/coordinate.hpp> #include <osrm/coordinate.hpp>
#include <osrm/server_paths.hpp>
using RTreeLeaf = BaseDataFacade<QueryEdge::EdgeData>::RTreeLeaf; using RTreeLeaf = BaseDataFacade<QueryEdge::EdgeData>::RTreeLeaf;
using RTreeNode = StaticRTree<RTreeLeaf, ShM<FixedPointCoordinate, true>::vector, true>::TreeNode; using RTreeNode = StaticRTree<RTreeLeaf, ShM<FixedPointCoordinate, true>::vector, true>::TreeNode;
@ -63,6 +61,7 @@ using QueryGraph = StaticGraph<QueryEdge::EdgeData>;
#include <fstream> #include <fstream>
#include <string> #include <string>
#include <new>
// delete a shared memory region. report warning if it could not be deleted // delete a shared memory region. report warning if it could not be deleted
void delete_region(const SharedDataType region) void delete_region(const SharedDataType region)
@ -94,360 +93,437 @@ void delete_region(const SharedDataType region)
} }
} }
int main(const int argc, const char *argv[]) int main(const int argc, const char *argv[]) try
{ {
LogPolicy::GetInstance().Unmute(); LogPolicy::GetInstance().Unmute();
SharedBarriers barrier; SharedBarriers barrier;
try
{
#ifdef __linux__ #ifdef __linux__
// try to disable swapping on Linux // try to disable swapping on Linux
const bool lock_flags = MCL_CURRENT | MCL_FUTURE; const bool lock_flags = MCL_CURRENT | MCL_FUTURE;
if (-1 == mlockall(lock_flags)) if (-1 == mlockall(lock_flags))
{
SimpleLogger().Write(logWARNING) << "Process " << argv[0]
<< " could not request RAM lock";
}
#endif
try
{
boost::interprocess::scoped_lock<boost::interprocess::named_mutex> pending_lock(
barrier.pending_update_mutex);
}
catch (...)
{
// hard unlock in case of any exception.
barrier.pending_update_mutex.unlock();
}
}
catch (const std::exception &e)
{ {
SimpleLogger().Write(logWARNING) << "[exception] " << e.what(); SimpleLogger().Write(logWARNING) << "Process " << argv[0] << " could not request RAM lock";
} }
#endif
try try
{ {
SimpleLogger().Write(logDEBUG) << "Checking input parameters"; boost::interprocess::scoped_lock<boost::interprocess::named_mutex> pending_lock(
barrier.pending_update_mutex);
}
catch (...)
{
// hard unlock in case of any exception.
barrier.pending_update_mutex.unlock();
}
ServerPaths server_paths; SimpleLogger().Write(logDEBUG) << "Checking input parameters";
if (!GenerateDataStoreOptions(argc, argv, server_paths))
{
return 0;
}
if (server_paths.find("hsgrdata") == server_paths.end()) std::unordered_map<std::string, boost::filesystem::path> server_paths;
{ if (!GenerateDataStoreOptions(argc, argv, server_paths))
throw osrm::exception("no hsgr file found"); {
} return EXIT_SUCCESS;
if (server_paths.find("ramindex") == server_paths.end()) }
{
throw osrm::exception("no ram index file found");
}
if (server_paths.find("fileindex") == server_paths.end())
{
throw osrm::exception("no leaf index file found");
}
if (server_paths.find("nodesdata") == server_paths.end())
{
throw osrm::exception("no nodes file found");
}
if (server_paths.find("edgesdata") == server_paths.end())
{
throw osrm::exception("no edges file found");
}
if (server_paths.find("namesdata") == server_paths.end())
{
throw osrm::exception("no names file found");
}
if (server_paths.find("geometry") == server_paths.end())
{
throw osrm::exception("no geometry file found");
}
if (server_paths.find("core") == server_paths.end())
{
throw osrm::exception("no core file found");
}
ServerPaths::const_iterator paths_iterator = server_paths.find("hsgrdata"); if (server_paths.find("hsgrdata") == server_paths.end())
BOOST_ASSERT(server_paths.end() != paths_iterator); {
BOOST_ASSERT(!paths_iterator->second.empty()); throw osrm::exception("no hsgr file found");
const boost::filesystem::path &hsgr_path = paths_iterator->second; }
paths_iterator = server_paths.find("timestamp"); if (server_paths.find("ramindex") == server_paths.end())
BOOST_ASSERT(server_paths.end() != paths_iterator); {
BOOST_ASSERT(!paths_iterator->second.empty()); throw osrm::exception("no ram index file found");
const boost::filesystem::path &timestamp_path = paths_iterator->second; }
paths_iterator = server_paths.find("ramindex"); if (server_paths.find("fileindex") == server_paths.end())
BOOST_ASSERT(server_paths.end() != paths_iterator); {
BOOST_ASSERT(!paths_iterator->second.empty()); throw osrm::exception("no leaf index file found");
const boost::filesystem::path &ram_index_path = paths_iterator->second; }
paths_iterator = server_paths.find("fileindex"); if (server_paths.find("nodesdata") == server_paths.end())
BOOST_ASSERT(server_paths.end() != paths_iterator); {
BOOST_ASSERT(!paths_iterator->second.empty()); throw osrm::exception("no nodes file found");
const boost::filesystem::path index_file_path_absolute = }
boost::filesystem::portable_canonical(paths_iterator->second); if (server_paths.find("edgesdata") == server_paths.end())
const std::string &file_index_path = index_file_path_absolute.string(); {
paths_iterator = server_paths.find("nodesdata"); throw osrm::exception("no edges file found");
BOOST_ASSERT(server_paths.end() != paths_iterator); }
BOOST_ASSERT(!paths_iterator->second.empty()); if (server_paths.find("namesdata") == server_paths.end())
const boost::filesystem::path &nodes_data_path = paths_iterator->second; {
paths_iterator = server_paths.find("edgesdata"); throw osrm::exception("no names file found");
BOOST_ASSERT(server_paths.end() != paths_iterator); }
BOOST_ASSERT(!paths_iterator->second.empty()); if (server_paths.find("geometry") == server_paths.end())
const boost::filesystem::path &edges_data_path = paths_iterator->second; {
paths_iterator = server_paths.find("namesdata"); throw osrm::exception("no geometry file found");
BOOST_ASSERT(server_paths.end() != paths_iterator); }
BOOST_ASSERT(!paths_iterator->second.empty()); if (server_paths.find("core") == server_paths.end())
const boost::filesystem::path &names_data_path = paths_iterator->second; {
paths_iterator = server_paths.find("geometry"); throw osrm::exception("no core file found");
BOOST_ASSERT(server_paths.end() != paths_iterator); }
BOOST_ASSERT(!paths_iterator->second.empty());
const boost::filesystem::path &geometries_data_path = paths_iterator->second;
paths_iterator = server_paths.find("core");
BOOST_ASSERT(server_paths.end() != paths_iterator);
BOOST_ASSERT(!paths_iterator->second.empty());
const boost::filesystem::path &core_marker_path = paths_iterator->second;
// determine segment to use auto paths_iterator = server_paths.find("hsgrdata");
bool segment2_in_use = SharedMemory::RegionExists(LAYOUT_2); BOOST_ASSERT(server_paths.end() != paths_iterator);
const SharedDataType layout_region = [&] BOOST_ASSERT(!paths_iterator->second.empty());
{ const boost::filesystem::path &hsgr_path = paths_iterator->second;
return segment2_in_use ? LAYOUT_1 : LAYOUT_2; paths_iterator = server_paths.find("timestamp");
}(); BOOST_ASSERT(server_paths.end() != paths_iterator);
const SharedDataType data_region = [&] BOOST_ASSERT(!paths_iterator->second.empty());
{ const boost::filesystem::path &timestamp_path = paths_iterator->second;
return segment2_in_use ? DATA_1 : DATA_2; paths_iterator = server_paths.find("ramindex");
}(); BOOST_ASSERT(server_paths.end() != paths_iterator);
const SharedDataType previous_layout_region = [&] BOOST_ASSERT(!paths_iterator->second.empty());
{ const boost::filesystem::path &ram_index_path = paths_iterator->second;
return segment2_in_use ? LAYOUT_2 : LAYOUT_1; paths_iterator = server_paths.find("fileindex");
}(); BOOST_ASSERT(server_paths.end() != paths_iterator);
const SharedDataType previous_data_region = [&] BOOST_ASSERT(!paths_iterator->second.empty());
{ const boost::filesystem::path index_file_path_absolute =
return segment2_in_use ? DATA_2 : DATA_1; boost::filesystem::canonical(paths_iterator->second);
}(); const std::string &file_index_path = index_file_path_absolute.string();
paths_iterator = server_paths.find("nodesdata");
BOOST_ASSERT(server_paths.end() != paths_iterator);
BOOST_ASSERT(!paths_iterator->second.empty());
const boost::filesystem::path &nodes_data_path = paths_iterator->second;
paths_iterator = server_paths.find("edgesdata");
BOOST_ASSERT(server_paths.end() != paths_iterator);
BOOST_ASSERT(!paths_iterator->second.empty());
const boost::filesystem::path &edges_data_path = paths_iterator->second;
paths_iterator = server_paths.find("namesdata");
BOOST_ASSERT(server_paths.end() != paths_iterator);
BOOST_ASSERT(!paths_iterator->second.empty());
const boost::filesystem::path &names_data_path = paths_iterator->second;
paths_iterator = server_paths.find("geometry");
BOOST_ASSERT(server_paths.end() != paths_iterator);
BOOST_ASSERT(!paths_iterator->second.empty());
const boost::filesystem::path &geometries_data_path = paths_iterator->second;
paths_iterator = server_paths.find("core");
BOOST_ASSERT(server_paths.end() != paths_iterator);
BOOST_ASSERT(!paths_iterator->second.empty());
const boost::filesystem::path &core_marker_path = paths_iterator->second;
// Allocate a memory layout in shared memory, deallocate previous // determine segment to use
SharedMemory *layout_memory = bool segment2_in_use = SharedMemory::RegionExists(LAYOUT_2);
SharedMemoryFactory::Get(layout_region, sizeof(SharedDataLayout)); const SharedDataType layout_region = [&]
SharedDataLayout *shared_layout_ptr = new (layout_memory->Ptr()) SharedDataLayout(); {
return segment2_in_use ? LAYOUT_1 : LAYOUT_2;
}();
const SharedDataType data_region = [&]
{
return segment2_in_use ? DATA_1 : DATA_2;
}();
const SharedDataType previous_layout_region = [&]
{
return segment2_in_use ? LAYOUT_2 : LAYOUT_1;
}();
const SharedDataType previous_data_region = [&]
{
return segment2_in_use ? DATA_2 : DATA_1;
}();
shared_layout_ptr->SetBlockSize<char>(SharedDataLayout::FILE_INDEX_PATH, // Allocate a memory layout in shared memory, deallocate previous
file_index_path.length() + 1); auto *layout_memory = SharedMemoryFactory::Get(layout_region, sizeof(SharedDataLayout));
auto *shared_layout_ptr = new (layout_memory->Ptr()) SharedDataLayout();
// collect number of elements to store in shared memory object shared_layout_ptr->SetBlockSize<char>(SharedDataLayout::FILE_INDEX_PATH,
SimpleLogger().Write() << "load names from: " << names_data_path; file_index_path.length() + 1);
// number of entries in name index
boost::filesystem::ifstream name_stream(names_data_path, std::ios::binary);
unsigned name_blocks = 0;
name_stream.read((char *)&name_blocks, sizeof(unsigned));
shared_layout_ptr->SetBlockSize<unsigned>(SharedDataLayout::NAME_OFFSETS, name_blocks);
shared_layout_ptr->SetBlockSize<typename RangeTable<16, true>::BlockT>(
SharedDataLayout::NAME_BLOCKS, name_blocks);
SimpleLogger().Write() << "name offsets size: " << name_blocks;
BOOST_ASSERT_MSG(0 != name_blocks, "name file broken");
unsigned number_of_chars = 0; // collect number of elements to store in shared memory object
name_stream.read((char *)&number_of_chars, sizeof(unsigned)); SimpleLogger().Write() << "load names from: " << names_data_path;
shared_layout_ptr->SetBlockSize<char>(SharedDataLayout::NAME_CHAR_LIST, number_of_chars); // number of entries in name index
boost::filesystem::ifstream name_stream(names_data_path, std::ios::binary);
unsigned name_blocks = 0;
name_stream.read((char *)&name_blocks, sizeof(unsigned));
shared_layout_ptr->SetBlockSize<unsigned>(SharedDataLayout::NAME_OFFSETS, name_blocks);
shared_layout_ptr->SetBlockSize<typename RangeTable<16, true>::BlockT>(
SharedDataLayout::NAME_BLOCKS, name_blocks);
SimpleLogger().Write() << "name offsets size: " << name_blocks;
BOOST_ASSERT_MSG(0 != name_blocks, "name file broken");
// Loading information for original edges unsigned number_of_chars = 0;
boost::filesystem::ifstream edges_input_stream(edges_data_path, std::ios::binary); name_stream.read((char *)&number_of_chars, sizeof(unsigned));
unsigned number_of_original_edges = 0; shared_layout_ptr->SetBlockSize<char>(SharedDataLayout::NAME_CHAR_LIST, number_of_chars);
edges_input_stream.read((char *)&number_of_original_edges, sizeof(unsigned));
// note: settings this all to the same size is correct, we extract them from the same struct // Loading information for original edges
shared_layout_ptr->SetBlockSize<NodeID>(SharedDataLayout::VIA_NODE_LIST, boost::filesystem::ifstream edges_input_stream(edges_data_path, std::ios::binary);
unsigned number_of_original_edges = 0;
edges_input_stream.read((char *)&number_of_original_edges, sizeof(unsigned));
// note: settings this all to the same size is correct, we extract them from the same struct
shared_layout_ptr->SetBlockSize<NodeID>(SharedDataLayout::VIA_NODE_LIST,
number_of_original_edges);
shared_layout_ptr->SetBlockSize<unsigned>(SharedDataLayout::NAME_ID_LIST,
number_of_original_edges);
shared_layout_ptr->SetBlockSize<TravelMode>(SharedDataLayout::TRAVEL_MODE,
number_of_original_edges); number_of_original_edges);
shared_layout_ptr->SetBlockSize<unsigned>(SharedDataLayout::NAME_ID_LIST, shared_layout_ptr->SetBlockSize<TurnInstruction>(SharedDataLayout::TURN_INSTRUCTION,
number_of_original_edges); number_of_original_edges);
shared_layout_ptr->SetBlockSize<TravelMode>(SharedDataLayout::TRAVEL_MODE, // note: there are 32 geometry indicators in one unsigned block
number_of_original_edges); shared_layout_ptr->SetBlockSize<unsigned>(SharedDataLayout::GEOMETRIES_INDICATORS,
shared_layout_ptr->SetBlockSize<TurnInstruction>(SharedDataLayout::TURN_INSTRUCTION, number_of_original_edges);
number_of_original_edges);
// note: there are 32 geometry indicators in one unsigned block
shared_layout_ptr->SetBlockSize<unsigned>(SharedDataLayout::GEOMETRIES_INDICATORS,
number_of_original_edges);
boost::filesystem::ifstream hsgr_input_stream(hsgr_path, std::ios::binary); boost::filesystem::ifstream hsgr_input_stream(hsgr_path, std::ios::binary);
FingerPrint fingerprint_valid = FingerPrint::GetValid(); FingerPrint fingerprint_valid = FingerPrint::GetValid();
FingerPrint fingerprint_loaded; FingerPrint fingerprint_loaded;
hsgr_input_stream.read((char *)&fingerprint_loaded, sizeof(FingerPrint)); hsgr_input_stream.read((char *)&fingerprint_loaded, sizeof(FingerPrint));
if (fingerprint_loaded.TestGraphUtil(fingerprint_valid)) if (fingerprint_loaded.TestGraphUtil(fingerprint_valid))
{
SimpleLogger().Write(logDEBUG) << "Fingerprint checked out ok";
}
else
{
SimpleLogger().Write(logWARNING) << ".hsgr was prepared with different build. "
"Reprocess to get rid of this warning.";
}
// load checksum
unsigned checksum = 0;
hsgr_input_stream.read((char *)&checksum, sizeof(unsigned));
shared_layout_ptr->SetBlockSize<unsigned>(SharedDataLayout::HSGR_CHECKSUM, 1);
// load graph node size
unsigned number_of_graph_nodes = 0;
hsgr_input_stream.read((char *)&number_of_graph_nodes, sizeof(unsigned));
BOOST_ASSERT_MSG((0 != number_of_graph_nodes), "number of nodes is zero");
shared_layout_ptr->SetBlockSize<QueryGraph::NodeArrayEntry>(SharedDataLayout::GRAPH_NODE_LIST,
number_of_graph_nodes);
// load graph edge size
unsigned number_of_graph_edges = 0;
hsgr_input_stream.read((char *)&number_of_graph_edges, sizeof(unsigned));
// BOOST_ASSERT_MSG(0 != number_of_graph_edges, "number of graph edges is zero");
shared_layout_ptr->SetBlockSize<QueryGraph::EdgeArrayEntry>(SharedDataLayout::GRAPH_EDGE_LIST,
number_of_graph_edges);
// load rsearch tree size
boost::filesystem::ifstream tree_node_file(ram_index_path, std::ios::binary);
uint32_t tree_size = 0;
tree_node_file.read((char *)&tree_size, sizeof(uint32_t));
shared_layout_ptr->SetBlockSize<RTreeNode>(SharedDataLayout::R_SEARCH_TREE, tree_size);
// load timestamp size
std::string m_timestamp;
if (boost::filesystem::exists(timestamp_path))
{
boost::filesystem::ifstream timestamp_stream(timestamp_path);
if (!timestamp_stream)
{ {
SimpleLogger().Write(logDEBUG) << "Fingerprint checked out ok"; SimpleLogger().Write(logWARNING) << timestamp_path << " not found. setting to default";
} }
else else
{ {
SimpleLogger().Write(logWARNING) << ".hsgr was prepared with different build. " getline(timestamp_stream, m_timestamp);
"Reprocess to get rid of this warning."; timestamp_stream.close();
} }
}
if (m_timestamp.empty())
{
m_timestamp = "n/a";
}
if (25 < m_timestamp.length())
{
m_timestamp.resize(25);
}
shared_layout_ptr->SetBlockSize<char>(SharedDataLayout::TIMESTAMP, m_timestamp.length());
// load checksum // load core marker size
unsigned checksum = 0; boost::filesystem::ifstream core_marker_file(core_marker_path, std::ios::binary);
hsgr_input_stream.read((char *)&checksum, sizeof(unsigned));
shared_layout_ptr->SetBlockSize<unsigned>(SharedDataLayout::HSGR_CHECKSUM, 1);
// load graph node size
unsigned number_of_graph_nodes = 0;
hsgr_input_stream.read((char *)&number_of_graph_nodes, sizeof(unsigned));
BOOST_ASSERT_MSG((0 != number_of_graph_nodes), "number of nodes is zero"); uint32_t number_of_core_markers = 0;
shared_layout_ptr->SetBlockSize<QueryGraph::NodeArrayEntry>( core_marker_file.read((char *)&number_of_core_markers, sizeof(uint32_t));
SharedDataLayout::GRAPH_NODE_LIST, number_of_graph_nodes); shared_layout_ptr->SetBlockSize<unsigned>(SharedDataLayout::CORE_MARKER,
number_of_core_markers);
// load graph edge size // load coordinate size
unsigned number_of_graph_edges = 0; boost::filesystem::ifstream nodes_input_stream(nodes_data_path, std::ios::binary);
hsgr_input_stream.read((char *)&number_of_graph_edges, sizeof(unsigned)); unsigned coordinate_list_size = 0;
// BOOST_ASSERT_MSG(0 != number_of_graph_edges, "number of graph edges is zero"); nodes_input_stream.read((char *)&coordinate_list_size, sizeof(unsigned));
shared_layout_ptr->SetBlockSize<QueryGraph::EdgeArrayEntry>( shared_layout_ptr->SetBlockSize<FixedPointCoordinate>(SharedDataLayout::COORDINATE_LIST,
SharedDataLayout::GRAPH_EDGE_LIST, number_of_graph_edges); coordinate_list_size);
// load rsearch tree size // load geometries sizes
boost::filesystem::ifstream tree_node_file(ram_index_path, std::ios::binary); std::ifstream geometry_input_stream(geometries_data_path.string().c_str(), std::ios::binary);
unsigned number_of_geometries_indices = 0;
unsigned number_of_compressed_geometries = 0;
uint32_t tree_size = 0; geometry_input_stream.read((char *)&number_of_geometries_indices, sizeof(unsigned));
tree_node_file.read((char *)&tree_size, sizeof(uint32_t)); shared_layout_ptr->SetBlockSize<unsigned>(SharedDataLayout::GEOMETRIES_INDEX,
shared_layout_ptr->SetBlockSize<RTreeNode>(SharedDataLayout::R_SEARCH_TREE, tree_size); number_of_geometries_indices);
boost::iostreams::seek(geometry_input_stream, number_of_geometries_indices * sizeof(unsigned),
BOOST_IOS::cur);
geometry_input_stream.read((char *)&number_of_compressed_geometries, sizeof(unsigned));
shared_layout_ptr->SetBlockSize<unsigned>(SharedDataLayout::GEOMETRIES_LIST,
number_of_compressed_geometries);
// allocate shared memory block
SimpleLogger().Write() << "allocating shared memory of " << shared_layout_ptr->GetSizeOfLayout()
<< " bytes";
SharedMemory *shared_memory =
SharedMemoryFactory::Get(data_region, shared_layout_ptr->GetSizeOfLayout());
char *shared_memory_ptr = static_cast<char *>(shared_memory->Ptr());
// load timestamp size // read actual data into shared memory object //
std::string m_timestamp;
if (boost::filesystem::exists(timestamp_path)) // hsgr checksum
unsigned *checksum_ptr = shared_layout_ptr->GetBlockPtr<unsigned, true>(
shared_memory_ptr, SharedDataLayout::HSGR_CHECKSUM);
*checksum_ptr = checksum;
// ram index file name
char *file_index_path_ptr = shared_layout_ptr->GetBlockPtr<char, true>(
shared_memory_ptr, SharedDataLayout::FILE_INDEX_PATH);
// make sure we have 0 ending
std::fill(file_index_path_ptr,
file_index_path_ptr +
shared_layout_ptr->GetBlockSize(SharedDataLayout::FILE_INDEX_PATH),
0);
std::copy(file_index_path.begin(), file_index_path.end(), file_index_path_ptr);
// Loading street names
unsigned *name_offsets_ptr = shared_layout_ptr->GetBlockPtr<unsigned, true>(
shared_memory_ptr, SharedDataLayout::NAME_OFFSETS);
if (shared_layout_ptr->GetBlockSize(SharedDataLayout::NAME_OFFSETS) > 0)
{
name_stream.read((char *)name_offsets_ptr,
shared_layout_ptr->GetBlockSize(SharedDataLayout::NAME_OFFSETS));
}
unsigned *name_blocks_ptr = shared_layout_ptr->GetBlockPtr<unsigned, true>(
shared_memory_ptr, SharedDataLayout::NAME_BLOCKS);
if (shared_layout_ptr->GetBlockSize(SharedDataLayout::NAME_BLOCKS) > 0)
{
name_stream.read((char *)name_blocks_ptr,
shared_layout_ptr->GetBlockSize(SharedDataLayout::NAME_BLOCKS));
}
char *name_char_ptr = shared_layout_ptr->GetBlockPtr<char, true>(
shared_memory_ptr, SharedDataLayout::NAME_CHAR_LIST);
unsigned temp_length;
name_stream.read((char *)&temp_length, sizeof(unsigned));
BOOST_ASSERT_MSG(temp_length ==
shared_layout_ptr->GetBlockSize(SharedDataLayout::NAME_CHAR_LIST),
"Name file corrupted!");
if (shared_layout_ptr->GetBlockSize(SharedDataLayout::NAME_CHAR_LIST) > 0)
{
name_stream.read(name_char_ptr,
shared_layout_ptr->GetBlockSize(SharedDataLayout::NAME_CHAR_LIST));
}
name_stream.close();
// load original edge information
NodeID *via_node_ptr = shared_layout_ptr->GetBlockPtr<NodeID, true>(
shared_memory_ptr, SharedDataLayout::VIA_NODE_LIST);
unsigned *name_id_ptr = shared_layout_ptr->GetBlockPtr<unsigned, true>(
shared_memory_ptr, SharedDataLayout::NAME_ID_LIST);
TravelMode *travel_mode_ptr = shared_layout_ptr->GetBlockPtr<TravelMode, true>(
shared_memory_ptr, SharedDataLayout::TRAVEL_MODE);
TurnInstruction *turn_instructions_ptr = shared_layout_ptr->GetBlockPtr<TurnInstruction, true>(
shared_memory_ptr, SharedDataLayout::TURN_INSTRUCTION);
unsigned *geometries_indicator_ptr = shared_layout_ptr->GetBlockPtr<unsigned, true>(
shared_memory_ptr, SharedDataLayout::GEOMETRIES_INDICATORS);
OriginalEdgeData current_edge_data;
for (unsigned i = 0; i < number_of_original_edges; ++i)
{
edges_input_stream.read((char *)&(current_edge_data), sizeof(OriginalEdgeData));
via_node_ptr[i] = current_edge_data.via_node;
name_id_ptr[i] = current_edge_data.name_id;
travel_mode_ptr[i] = current_edge_data.travel_mode;
turn_instructions_ptr[i] = current_edge_data.turn_instruction;
const unsigned bucket = i / 32;
const unsigned offset = i % 32;
const unsigned value = [&]
{ {
boost::filesystem::ifstream timestamp_stream(timestamp_path); unsigned return_value = 0;
if (!timestamp_stream) if (0 != offset)
{ {
SimpleLogger().Write(logWARNING) << timestamp_path return_value = geometries_indicator_ptr[bucket];
<< " not found. setting to default";
} }
else return return_value;
{ }();
getline(timestamp_stream, m_timestamp); if (current_edge_data.compressed_geometry)
timestamp_stream.close();
}
}
if (m_timestamp.empty())
{ {
m_timestamp = "n/a"; geometries_indicator_ptr[bucket] = (value | (1 << offset));
} }
if (25 < m_timestamp.length()) }
edges_input_stream.close();
// load compressed geometry
unsigned temporary_value;
unsigned *geometries_index_ptr = shared_layout_ptr->GetBlockPtr<unsigned, true>(
shared_memory_ptr, SharedDataLayout::GEOMETRIES_INDEX);
geometry_input_stream.seekg(0, geometry_input_stream.beg);
geometry_input_stream.read((char *)&temporary_value, sizeof(unsigned));
BOOST_ASSERT(temporary_value ==
shared_layout_ptr->num_entries[SharedDataLayout::GEOMETRIES_INDEX]);
if (shared_layout_ptr->GetBlockSize(SharedDataLayout::GEOMETRIES_INDEX) > 0)
{
geometry_input_stream.read(
(char *)geometries_index_ptr,
shared_layout_ptr->GetBlockSize(SharedDataLayout::GEOMETRIES_INDEX));
}
unsigned *geometries_list_ptr = shared_layout_ptr->GetBlockPtr<unsigned, true>(
shared_memory_ptr, SharedDataLayout::GEOMETRIES_LIST);
geometry_input_stream.read((char *)&temporary_value, sizeof(unsigned));
BOOST_ASSERT(temporary_value ==
shared_layout_ptr->num_entries[SharedDataLayout::GEOMETRIES_LIST]);
if (shared_layout_ptr->GetBlockSize(SharedDataLayout::GEOMETRIES_LIST) > 0)
{
geometry_input_stream.read(
(char *)geometries_list_ptr,
shared_layout_ptr->GetBlockSize(SharedDataLayout::GEOMETRIES_LIST));
}
// Loading list of coordinates
FixedPointCoordinate *coordinates_ptr =
shared_layout_ptr->GetBlockPtr<FixedPointCoordinate, true>(
shared_memory_ptr, SharedDataLayout::COORDINATE_LIST);
QueryNode current_node;
for (unsigned i = 0; i < coordinate_list_size; ++i)
{
nodes_input_stream.read((char *)&current_node, sizeof(QueryNode));
coordinates_ptr[i] = FixedPointCoordinate(current_node.lat, current_node.lon);
}
nodes_input_stream.close();
// store timestamp
char *timestamp_ptr =
shared_layout_ptr->GetBlockPtr<char, true>(shared_memory_ptr, SharedDataLayout::TIMESTAMP);
std::copy(m_timestamp.c_str(), m_timestamp.c_str() + m_timestamp.length(), timestamp_ptr);
// store search tree portion of rtree
char *rtree_ptr = shared_layout_ptr->GetBlockPtr<char, true>(shared_memory_ptr,
SharedDataLayout::R_SEARCH_TREE);
if (tree_size > 0)
{
tree_node_file.read(rtree_ptr, sizeof(RTreeNode) * tree_size);
}
tree_node_file.close();
// load core markers
std::vector<char> unpacked_core_markers(number_of_core_markers);
core_marker_file.read((char *)unpacked_core_markers.data(),
sizeof(char) * number_of_core_markers);
unsigned *core_marker_ptr = shared_layout_ptr->GetBlockPtr<unsigned, true>(
shared_memory_ptr, SharedDataLayout::CORE_MARKER);
for (auto i = 0u; i < number_of_core_markers; ++i)
{
BOOST_ASSERT(unpacked_core_markers[i] == 0 || unpacked_core_markers[i] == 1);
if (unpacked_core_markers[i] == 1)
{ {
m_timestamp.resize(25);
}
shared_layout_ptr->SetBlockSize<char>(SharedDataLayout::TIMESTAMP, m_timestamp.length());
// load core marker size
boost::filesystem::ifstream core_marker_file(core_marker_path, std::ios::binary);
uint32_t number_of_core_markers = 0;
core_marker_file.read((char *)&number_of_core_markers, sizeof(uint32_t));
shared_layout_ptr->SetBlockSize<unsigned>(SharedDataLayout::CORE_MARKER, number_of_core_markers);
// load coordinate size
boost::filesystem::ifstream nodes_input_stream(nodes_data_path, std::ios::binary);
unsigned coordinate_list_size = 0;
nodes_input_stream.read((char *)&coordinate_list_size, sizeof(unsigned));
shared_layout_ptr->SetBlockSize<FixedPointCoordinate>(SharedDataLayout::COORDINATE_LIST,
coordinate_list_size);
// load geometries sizes
std::ifstream geometry_input_stream(geometries_data_path.string().c_str(),
std::ios::binary);
unsigned number_of_geometries_indices = 0;
unsigned number_of_compressed_geometries = 0;
geometry_input_stream.read((char *)&number_of_geometries_indices, sizeof(unsigned));
shared_layout_ptr->SetBlockSize<unsigned>(SharedDataLayout::GEOMETRIES_INDEX,
number_of_geometries_indices);
boost::iostreams::seek(geometry_input_stream,
number_of_geometries_indices * sizeof(unsigned), BOOST_IOS::cur);
geometry_input_stream.read((char *)&number_of_compressed_geometries, sizeof(unsigned));
shared_layout_ptr->SetBlockSize<unsigned>(SharedDataLayout::GEOMETRIES_LIST,
number_of_compressed_geometries);
// allocate shared memory block
SimpleLogger().Write() << "allocating shared memory of "
<< shared_layout_ptr->GetSizeOfLayout() << " bytes";
SharedMemory *shared_memory =
SharedMemoryFactory::Get(data_region, shared_layout_ptr->GetSizeOfLayout());
char *shared_memory_ptr = static_cast<char *>(shared_memory->Ptr());
// read actual data into shared memory object //
// hsgr checksum
unsigned *checksum_ptr = shared_layout_ptr->GetBlockPtr<unsigned, true>(
shared_memory_ptr, SharedDataLayout::HSGR_CHECKSUM);
*checksum_ptr = checksum;
// ram index file name
char *file_index_path_ptr = shared_layout_ptr->GetBlockPtr<char, true>(
shared_memory_ptr, SharedDataLayout::FILE_INDEX_PATH);
// make sure we have 0 ending
std::fill(file_index_path_ptr,
file_index_path_ptr +
shared_layout_ptr->GetBlockSize(SharedDataLayout::FILE_INDEX_PATH),
0);
std::copy(file_index_path.begin(), file_index_path.end(), file_index_path_ptr);
// Loading street names
unsigned *name_offsets_ptr = shared_layout_ptr->GetBlockPtr<unsigned, true>(
shared_memory_ptr, SharedDataLayout::NAME_OFFSETS);
if (shared_layout_ptr->GetBlockSize(SharedDataLayout::NAME_OFFSETS) > 0)
{
name_stream.read((char *)name_offsets_ptr,
shared_layout_ptr->GetBlockSize(SharedDataLayout::NAME_OFFSETS));
}
unsigned *name_blocks_ptr = shared_layout_ptr->GetBlockPtr<unsigned, true>(
shared_memory_ptr, SharedDataLayout::NAME_BLOCKS);
if (shared_layout_ptr->GetBlockSize(SharedDataLayout::NAME_BLOCKS) > 0)
{
name_stream.read((char *)name_blocks_ptr,
shared_layout_ptr->GetBlockSize(SharedDataLayout::NAME_BLOCKS));
}
char *name_char_ptr = shared_layout_ptr->GetBlockPtr<char, true>(
shared_memory_ptr, SharedDataLayout::NAME_CHAR_LIST);
unsigned temp_length;
name_stream.read((char *)&temp_length, sizeof(unsigned));
BOOST_ASSERT_MSG(temp_length ==
shared_layout_ptr->GetBlockSize(SharedDataLayout::NAME_CHAR_LIST),
"Name file corrupted!");
if (shared_layout_ptr->GetBlockSize(SharedDataLayout::NAME_CHAR_LIST) > 0)
{
name_stream.read(name_char_ptr,
shared_layout_ptr->GetBlockSize(SharedDataLayout::NAME_CHAR_LIST));
}
name_stream.close();
// load original edge information
NodeID *via_node_ptr = shared_layout_ptr->GetBlockPtr<NodeID, true>(
shared_memory_ptr, SharedDataLayout::VIA_NODE_LIST);
unsigned *name_id_ptr = shared_layout_ptr->GetBlockPtr<unsigned, true>(
shared_memory_ptr, SharedDataLayout::NAME_ID_LIST);
TravelMode *travel_mode_ptr = shared_layout_ptr->GetBlockPtr<TravelMode, true>(
shared_memory_ptr, SharedDataLayout::TRAVEL_MODE);
TurnInstruction *turn_instructions_ptr =
shared_layout_ptr->GetBlockPtr<TurnInstruction, true>(
shared_memory_ptr, SharedDataLayout::TURN_INSTRUCTION);
unsigned *geometries_indicator_ptr = shared_layout_ptr->GetBlockPtr<unsigned, true>(
shared_memory_ptr, SharedDataLayout::GEOMETRIES_INDICATORS);
OriginalEdgeData current_edge_data;
for (unsigned i = 0; i < number_of_original_edges; ++i)
{
edges_input_stream.read((char *)&(current_edge_data), sizeof(OriginalEdgeData));
via_node_ptr[i] = current_edge_data.via_node;
name_id_ptr[i] = current_edge_data.name_id;
travel_mode_ptr[i] = current_edge_data.travel_mode;
turn_instructions_ptr[i] = current_edge_data.turn_instruction;
const unsigned bucket = i / 32; const unsigned bucket = i / 32;
const unsigned offset = i % 32; const unsigned offset = i % 32;
const unsigned value = [&] const unsigned value = [&]
@ -455,154 +531,68 @@ int main(const int argc, const char *argv[])
unsigned return_value = 0; unsigned return_value = 0;
if (0 != offset) if (0 != offset)
{ {
return_value = geometries_indicator_ptr[bucket]; return_value = core_marker_ptr[bucket];
} }
return return_value; return return_value;
}(); }();
if (current_edge_data.compressed_geometry)
{ core_marker_ptr[bucket] = (value | (1 << offset));
geometries_indicator_ptr[bucket] = (value | (1 << offset));
}
} }
edges_input_stream.close();
// load compressed geometry
unsigned temporary_value;
unsigned *geometries_index_ptr = shared_layout_ptr->GetBlockPtr<unsigned, true>(
shared_memory_ptr, SharedDataLayout::GEOMETRIES_INDEX);
geometry_input_stream.seekg(0, geometry_input_stream.beg);
geometry_input_stream.read((char *)&temporary_value, sizeof(unsigned));
BOOST_ASSERT(temporary_value ==
shared_layout_ptr->num_entries[SharedDataLayout::GEOMETRIES_INDEX]);
if (shared_layout_ptr->GetBlockSize(SharedDataLayout::GEOMETRIES_INDEX) > 0)
{
geometry_input_stream.read(
(char *)geometries_index_ptr,
shared_layout_ptr->GetBlockSize(SharedDataLayout::GEOMETRIES_INDEX));
}
unsigned *geometries_list_ptr = shared_layout_ptr->GetBlockPtr<unsigned, true>(
shared_memory_ptr, SharedDataLayout::GEOMETRIES_LIST);
geometry_input_stream.read((char *)&temporary_value, sizeof(unsigned));
BOOST_ASSERT(temporary_value ==
shared_layout_ptr->num_entries[SharedDataLayout::GEOMETRIES_LIST]);
if (shared_layout_ptr->GetBlockSize(SharedDataLayout::GEOMETRIES_LIST) > 0)
{
geometry_input_stream.read(
(char *)geometries_list_ptr,
shared_layout_ptr->GetBlockSize(SharedDataLayout::GEOMETRIES_LIST));
}
// Loading list of coordinates
FixedPointCoordinate *coordinates_ptr =
shared_layout_ptr->GetBlockPtr<FixedPointCoordinate, true>(
shared_memory_ptr, SharedDataLayout::COORDINATE_LIST);
QueryNode current_node;
for (unsigned i = 0; i < coordinate_list_size; ++i)
{
nodes_input_stream.read((char *)&current_node, sizeof(QueryNode));
coordinates_ptr[i] = FixedPointCoordinate(current_node.lat, current_node.lon);
}
nodes_input_stream.close();
// store timestamp
char *timestamp_ptr = shared_layout_ptr->GetBlockPtr<char, true>(
shared_memory_ptr, SharedDataLayout::TIMESTAMP);
std::copy(m_timestamp.c_str(), m_timestamp.c_str() + m_timestamp.length(), timestamp_ptr);
// store search tree portion of rtree
char *rtree_ptr = shared_layout_ptr->GetBlockPtr<char, true>(
shared_memory_ptr, SharedDataLayout::R_SEARCH_TREE);
if (tree_size > 0)
{
tree_node_file.read(rtree_ptr, sizeof(RTreeNode) * tree_size);
}
tree_node_file.close();
// load core markers
std::vector<char> unpacked_core_markers(number_of_core_markers);
core_marker_file.read((char *)unpacked_core_markers.data(), sizeof(char)*number_of_core_markers);
unsigned *core_marker_ptr = shared_layout_ptr->GetBlockPtr<unsigned, true>(
shared_memory_ptr, SharedDataLayout::CORE_MARKER);
for (auto i = 0u; i < number_of_core_markers; ++i)
{
BOOST_ASSERT(unpacked_core_markers[i] == 0 || unpacked_core_markers[i] == 1);
if (unpacked_core_markers[i] == 1)
{
const unsigned bucket = i / 32;
const unsigned offset = i % 32;
const unsigned value = [&]
{
unsigned return_value = 0;
if (0 != offset)
{
return_value = core_marker_ptr[bucket];
}
return return_value;
}();
core_marker_ptr[bucket] = (value | (1 << offset));
}
}
// load the nodes of the search graph
QueryGraph::NodeArrayEntry *graph_node_list_ptr =
shared_layout_ptr->GetBlockPtr<QueryGraph::NodeArrayEntry, true>(
shared_memory_ptr, SharedDataLayout::GRAPH_NODE_LIST);
if (shared_layout_ptr->GetBlockSize(SharedDataLayout::GRAPH_NODE_LIST) > 0)
{
hsgr_input_stream.read(
(char *)graph_node_list_ptr,
shared_layout_ptr->GetBlockSize(SharedDataLayout::GRAPH_NODE_LIST));
}
// load the edges of the search graph
QueryGraph::EdgeArrayEntry *graph_edge_list_ptr =
shared_layout_ptr->GetBlockPtr<QueryGraph::EdgeArrayEntry, true>(
shared_memory_ptr, SharedDataLayout::GRAPH_EDGE_LIST);
if (shared_layout_ptr->GetBlockSize(SharedDataLayout::GRAPH_EDGE_LIST) > 0)
{
hsgr_input_stream.read(
(char *)graph_edge_list_ptr,
shared_layout_ptr->GetBlockSize(SharedDataLayout::GRAPH_EDGE_LIST));
}
hsgr_input_stream.close();
// acquire lock
SharedMemory *data_type_memory =
SharedMemoryFactory::Get(CURRENT_REGIONS, sizeof(SharedDataTimestamp), true, false);
SharedDataTimestamp *data_timestamp_ptr =
static_cast<SharedDataTimestamp *>(data_type_memory->Ptr());
boost::interprocess::scoped_lock<boost::interprocess::named_mutex> query_lock(
barrier.query_mutex);
// notify all processes that were waiting for this condition
if (0 < barrier.number_of_queries)
{
barrier.no_running_queries_condition.wait(query_lock);
}
data_timestamp_ptr->layout = layout_region;
data_timestamp_ptr->data = data_region;
data_timestamp_ptr->timestamp += 1;
delete_region(previous_data_region);
delete_region(previous_layout_region);
SimpleLogger().Write() << "all data loaded";
shared_layout_ptr->PrintInformation();
} }
catch (const std::exception &e)
// load the nodes of the search graph
QueryGraph::NodeArrayEntry *graph_node_list_ptr =
shared_layout_ptr->GetBlockPtr<QueryGraph::NodeArrayEntry, true>(
shared_memory_ptr, SharedDataLayout::GRAPH_NODE_LIST);
if (shared_layout_ptr->GetBlockSize(SharedDataLayout::GRAPH_NODE_LIST) > 0)
{ {
SimpleLogger().Write(logWARNING) << "caught exception: " << e.what(); hsgr_input_stream.read((char *)graph_node_list_ptr,
shared_layout_ptr->GetBlockSize(SharedDataLayout::GRAPH_NODE_LIST));
} }
return 0; // load the edges of the search graph
QueryGraph::EdgeArrayEntry *graph_edge_list_ptr =
shared_layout_ptr->GetBlockPtr<QueryGraph::EdgeArrayEntry, true>(
shared_memory_ptr, SharedDataLayout::GRAPH_EDGE_LIST);
if (shared_layout_ptr->GetBlockSize(SharedDataLayout::GRAPH_EDGE_LIST) > 0)
{
hsgr_input_stream.read((char *)graph_edge_list_ptr,
shared_layout_ptr->GetBlockSize(SharedDataLayout::GRAPH_EDGE_LIST));
}
hsgr_input_stream.close();
// acquire lock
SharedMemory *data_type_memory =
SharedMemoryFactory::Get(CURRENT_REGIONS, sizeof(SharedDataTimestamp), true, false);
SharedDataTimestamp *data_timestamp_ptr =
static_cast<SharedDataTimestamp *>(data_type_memory->Ptr());
boost::interprocess::scoped_lock<boost::interprocess::named_mutex> query_lock(
barrier.query_mutex);
// notify all processes that were waiting for this condition
if (0 < barrier.number_of_queries)
{
barrier.no_running_queries_condition.wait(query_lock);
}
data_timestamp_ptr->layout = layout_region;
data_timestamp_ptr->data = data_region;
data_timestamp_ptr->timestamp += 1;
delete_region(previous_data_region);
delete_region(previous_layout_region);
SimpleLogger().Write() << "all data loaded";
shared_layout_ptr->PrintInformation();
}
catch (const std::bad_alloc &e)
{
SimpleLogger().Write(logWARNING) << "[exception] " << e.what();
SimpleLogger().Write(logWARNING) << "Please provide more memory or disable locking the virtual "
"address space (note: this makes OSRM swap, i.e. slow)";
return EXIT_FAILURE;
}
catch (const std::exception &e)
{
SimpleLogger().Write(logWARNING) << "caught exception: " << e.what();
} }

View File

@ -127,7 +127,7 @@ void DescriptionFactory::Run(const unsigned zoom_level)
{ {
// move down names by one, q&d hack // move down names by one, q&d hack
path_description[i - 1].name_id = path_description[i].name_id; path_description[i - 1].name_id = path_description[i].name_id;
path_description[i].length = coordinate_calculation::euclidean_distance( path_description[i].length = coordinate_calculation::great_circle_distance(
path_description[i - 1].location, path_description[i].location); path_description[i - 1].location, path_description[i].location);
} }
@ -230,18 +230,20 @@ void DescriptionFactory::Run(const unsigned zoom_level)
return; return;
} }
++necessary_segments;
if (first.is_via_location) if (first.is_via_location)
{ // mark the end of a leg (of several segments) { // mark the end of a leg (of several segments)
via_indices.push_back(necessary_segments); via_indices.push_back(necessary_segments);
} }
const double angle = coordinate_calculation::bearing(first.location, second.location); const double post_turn_bearing = coordinate_calculation::bearing(first.location, second.location);
first.bearing = static_cast<short>(angle * 10); const double pre_turn_bearing = coordinate_calculation::bearing(second.location, first.location);
first.post_turn_bearing = static_cast<short>(post_turn_bearing * 10);
first.pre_turn_bearing = static_cast<short>(pre_turn_bearing * 10);
++necessary_segments;
}); });
via_indices.push_back(necessary_segments + 1); via_indices.push_back(necessary_segments);
BOOST_ASSERT(via_indices.size() >= 2); BOOST_ASSERT(via_indices.size() >= 2);
return; return;
} }

View File

@ -35,6 +35,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../data_structures/segment_information.hpp" #include "../data_structures/segment_information.hpp"
#include "../data_structures/turn_instructions.hpp" #include "../data_structures/turn_instructions.hpp"
#include "../util/bearing.hpp" #include "../util/bearing.hpp"
#include "../util/cast.hpp"
#include "../util/integer_range.hpp" #include "../util/integer_range.hpp"
#include "../util/json_renderer.hpp" #include "../util/json_renderer.hpp"
#include "../util/simple_logger.hpp" #include "../util/simple_logger.hpp"
@ -43,7 +44,9 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <osrm/json_container.hpp> #include <osrm/json_container.hpp>
#include <limits>
#include <algorithm> #include <algorithm>
#include <string>
template <class DataFacadeT> class JSONDescriptor final : public BaseDescriptor<DataFacadeT> template <class DataFacadeT> class JSONDescriptor final : public BaseDescriptor<DataFacadeT>
{ {
@ -100,9 +103,6 @@ template <class DataFacadeT> class JSONDescriptor final : public BaseDescriptor<
if (INVALID_EDGE_WEIGHT == raw_route.shortest_path_length) if (INVALID_EDGE_WEIGHT == raw_route.shortest_path_length)
{ {
// We do not need to do much, if there is no route ;-) // We do not need to do much, if there is no route ;-)
json_result.values["status"] = 207;
json_result.values["status_message"] = "Cannot find route between points";
// osrm::json::render(reply.content, json_result);
return; return;
} }
@ -113,8 +113,6 @@ template <class DataFacadeT> class JSONDescriptor final : public BaseDescriptor<
description_factory.SetStartSegment( description_factory.SetStartSegment(
raw_route.segment_end_coordinates.front().source_phantom, raw_route.segment_end_coordinates.front().source_phantom,
raw_route.source_traversed_in_reverse.front()); raw_route.source_traversed_in_reverse.front());
json_result.values["status"] = 0;
json_result.values["status_message"] = "Found route between points";
// for each unpacked segment add the leg to the description // for each unpacked segment add the leg to the description
for (const auto i : osrm::irange<std::size_t>(0, raw_route.unpacked_path_segments.size())) for (const auto i : osrm::irange<std::size_t>(0, raw_route.unpacked_path_segments.size()))
@ -293,14 +291,13 @@ template <class DataFacadeT> class JSONDescriptor final : public BaseDescriptor<
std::vector<Segment> &route_segments_list) const std::vector<Segment> &route_segments_list) const
{ {
osrm::json::Array json_instruction_array; osrm::json::Array json_instruction_array;
// Segment information has following format: // Segment information has following format:
//["instruction id","streetname",length,position,time,"length","earth_direction",azimuth] //["instruction id","streetname",length,position,time,"length","earth_direction",azimuth]
unsigned necessary_segments_running_index = 0; unsigned necessary_segments_running_index = 0;
struct RoundAbout struct RoundAbout
{ {
RoundAbout() : start_index(INT_MAX), name_id(INVALID_NAMEID), leave_at_exit(INT_MAX) {} RoundAbout() : start_index(std::numeric_limits<int>::max()), name_id(INVALID_NAMEID), leave_at_exit(std::numeric_limits<int>::max()) {}
int start_index; int start_index;
unsigned name_id; unsigned name_id;
int leave_at_exit; int leave_at_exit;
@ -327,18 +324,18 @@ template <class DataFacadeT> class JSONDescriptor final : public BaseDescriptor<
std::string current_turn_instruction; std::string current_turn_instruction;
if (TurnInstruction::LeaveRoundAbout == current_instruction) if (TurnInstruction::LeaveRoundAbout == current_instruction)
{ {
temp_instruction = cast::integral_to_string( temp_instruction = std::to_string(
cast::enum_to_underlying(TurnInstruction::EnterRoundAbout)); cast::enum_to_underlying(TurnInstruction::EnterRoundAbout));
current_turn_instruction += temp_instruction; current_turn_instruction += temp_instruction;
current_turn_instruction += "-"; current_turn_instruction += "-";
temp_instruction = cast::integral_to_string(round_about.leave_at_exit + 1); temp_instruction = std::to_string(round_about.leave_at_exit + 1);
current_turn_instruction += temp_instruction; current_turn_instruction += temp_instruction;
round_about.leave_at_exit = 0; round_about.leave_at_exit = 0;
} }
else else
{ {
temp_instruction = temp_instruction =
cast::integral_to_string(cast::enum_to_underlying(current_instruction)); std::to_string(cast::enum_to_underlying(current_instruction));
current_turn_instruction += temp_instruction; current_turn_instruction += temp_instruction;
} }
json_instruction_row.values.push_back(current_turn_instruction); json_instruction_row.values.push_back(current_turn_instruction);
@ -348,17 +345,27 @@ template <class DataFacadeT> class JSONDescriptor final : public BaseDescriptor<
json_instruction_row.values.push_back(necessary_segments_running_index); json_instruction_row.values.push_back(necessary_segments_running_index);
json_instruction_row.values.push_back(std::round(segment.duration / 10.)); json_instruction_row.values.push_back(std::round(segment.duration / 10.));
json_instruction_row.values.push_back( json_instruction_row.values.push_back(
cast::integral_to_string(static_cast<unsigned>(segment.length)) + "m"); std::to_string(static_cast<unsigned>(segment.length)) + "m");
const double bearing_value = (segment.bearing / 10.);
json_instruction_row.values.push_back(bearing::get(bearing_value)); // post turn bearing
const double post_turn_bearing_value = (segment.post_turn_bearing / 10.);
json_instruction_row.values.push_back(bearing::get(post_turn_bearing_value));
json_instruction_row.values.push_back( json_instruction_row.values.push_back(
static_cast<unsigned>(round(bearing_value))); static_cast<unsigned>(round(post_turn_bearing_value)));
json_instruction_row.values.push_back(segment.travel_mode); json_instruction_row.values.push_back(segment.travel_mode);
// pre turn bearing
const double pre_turn_bearing_value = (segment.pre_turn_bearing / 10.);
json_instruction_row.values.push_back(bearing::get(pre_turn_bearing_value));
json_instruction_row.values.push_back(
static_cast<unsigned>(round(pre_turn_bearing_value)));
json_instruction_array.values.push_back(json_instruction_row);
route_segments_list.emplace_back( route_segments_list.emplace_back(
segment.name_id, static_cast<int>(segment.length), segment.name_id, static_cast<int>(segment.length),
static_cast<unsigned>(route_segments_list.size())); static_cast<unsigned>(route_segments_list.size()));
json_instruction_array.values.push_back(json_instruction_row);
} }
} }
else if (TurnInstruction::StayOnRoundAbout == current_instruction) else if (TurnInstruction::StayOnRoundAbout == current_instruction)
@ -372,8 +379,8 @@ template <class DataFacadeT> class JSONDescriptor final : public BaseDescriptor<
} }
osrm::json::Array json_last_instruction_row; osrm::json::Array json_last_instruction_row;
temp_instruction = cast::integral_to_string( temp_instruction =
cast::enum_to_underlying(TurnInstruction::ReachedYourDestination)); std::to_string(cast::enum_to_underlying(TurnInstruction::ReachedYourDestination));
json_last_instruction_row.values.push_back(temp_instruction); json_last_instruction_row.values.push_back(temp_instruction);
json_last_instruction_row.values.push_back(""); json_last_instruction_row.values.push_back("");
json_last_instruction_row.values.push_back(0); json_last_instruction_row.values.push_back(0);
@ -382,6 +389,8 @@ template <class DataFacadeT> class JSONDescriptor final : public BaseDescriptor<
json_last_instruction_row.values.push_back("0m"); json_last_instruction_row.values.push_back("0m");
json_last_instruction_row.values.push_back(bearing::get(0.0)); json_last_instruction_row.values.push_back(bearing::get(0.0));
json_last_instruction_row.values.push_back(0.); json_last_instruction_row.values.push_back(0.);
json_last_instruction_row.values.push_back(bearing::get(0.0));
json_last_instruction_row.values.push_back(0.);
json_instruction_array.values.push_back(json_last_instruction_row); json_instruction_array.values.push_back(json_last_instruction_row);
return json_instruction_array; return json_instruction_array;

View File

@ -31,53 +31,59 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <boost/filesystem.hpp> #include <boost/filesystem.hpp>
#include <cstdlib>
#include <exception> #include <exception>
#include <new>
int main(int argc, char *argv[]) int main(int argc, char *argv[]) try
{ {
try LogPolicy::GetInstance().Unmute();
ExtractorConfig extractor_config;
const return_code result = ExtractorOptions::ParseArguments(argc, argv, extractor_config);
if (return_code::fail == result)
{ {
LogPolicy::GetInstance().Unmute(); return EXIT_FAILURE;
ExtractorConfig extractor_config;
const return_code result = ExtractorOptions::ParseArguments(argc, argv, extractor_config);
if (return_code::fail == result)
{
return 1;
}
if (return_code::exit == result)
{
return 0;
}
ExtractorOptions::GenerateOutputFilesNames(extractor_config);
if (1 > extractor_config.requested_num_threads)
{
SimpleLogger().Write(logWARNING) << "Number of threads must be 1 or larger";
return 1;
}
if (!boost::filesystem::is_regular_file(extractor_config.input_path))
{
SimpleLogger().Write(logWARNING)
<< "Input file " << extractor_config.input_path.string() << " not found!";
return 1;
}
if (!boost::filesystem::is_regular_file(extractor_config.profile_path))
{
SimpleLogger().Write(logWARNING) << "Profile " << extractor_config.profile_path.string()
<< " not found!";
return 1;
}
return extractor(extractor_config).run();
} }
catch (const std::exception &e)
if (return_code::exit == result)
{ {
SimpleLogger().Write(logWARNING) << "[exception] " << e.what(); return EXIT_SUCCESS;
return 1;
} }
ExtractorOptions::GenerateOutputFilesNames(extractor_config);
if (1 > extractor_config.requested_num_threads)
{
SimpleLogger().Write(logWARNING) << "Number of threads must be 1 or larger";
return EXIT_FAILURE;
}
if (!boost::filesystem::is_regular_file(extractor_config.input_path))
{
SimpleLogger().Write(logWARNING) << "Input file " << extractor_config.input_path.string()
<< " not found!";
return EXIT_FAILURE;
}
if (!boost::filesystem::is_regular_file(extractor_config.profile_path))
{
SimpleLogger().Write(logWARNING) << "Profile " << extractor_config.profile_path.string()
<< " not found!";
return EXIT_FAILURE;
}
return extractor(extractor_config).run();
}
catch (const std::bad_alloc &e)
{
SimpleLogger().Write(logWARNING) << "[exception] " << e.what();
SimpleLogger().Write(logWARNING)
<< "Please provide more memory or consider using a larger swapfile";
return EXIT_FAILURE;
}
catch (const std::exception &e)
{
SimpleLogger().Write(logWARNING) << "[exception] " << e.what();
return EXIT_FAILURE;
} }

View File

@ -26,12 +26,16 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#include "edge_based_graph_factory.hpp" #include "edge_based_graph_factory.hpp"
#include "../algorithms/coordinate_calculation.hpp"
#include "../data_structures/percent.hpp" #include "../data_structures/percent.hpp"
#include "../util/compute_angle.hpp" #include "../util/compute_angle.hpp"
#include "../util/integer_range.hpp" #include "../util/integer_range.hpp"
#include "../util/lua_util.hpp" #include "../util/lua_util.hpp"
#include "../util/simple_logger.hpp" #include "../util/simple_logger.hpp"
#include "../util/timing_util.hpp" #include "../util/timing_util.hpp"
#include "../util/osrm_exception.hpp"
#include "../util/debug_geometry.hpp"
#include <boost/assert.hpp> #include <boost/assert.hpp>
@ -57,7 +61,8 @@ EdgeBasedGraphFactory::EdgeBasedGraphFactory(
void EdgeBasedGraphFactory::GetEdgeBasedEdges(DeallocatingVector<EdgeBasedEdge> &output_edge_list) void EdgeBasedGraphFactory::GetEdgeBasedEdges(DeallocatingVector<EdgeBasedEdge> &output_edge_list)
{ {
BOOST_ASSERT_MSG(0 == output_edge_list.size(), "Vector is not empty"); BOOST_ASSERT_MSG(0 == output_edge_list.size(), "Vector is not empty");
m_edge_based_edge_list.swap(output_edge_list); using std::swap; // Koenig swap
swap(m_edge_based_edge_list, output_edge_list);
} }
void EdgeBasedGraphFactory::GetEdgeBasedNodes(std::vector<EdgeBasedNode> &nodes) void EdgeBasedGraphFactory::GetEdgeBasedNodes(std::vector<EdgeBasedNode> &nodes)
@ -71,7 +76,14 @@ void EdgeBasedGraphFactory::GetEdgeBasedNodes(std::vector<EdgeBasedNode> &nodes)
BOOST_ASSERT(m_node_info_list.at(node.v).lat != INT_MAX); BOOST_ASSERT(m_node_info_list.at(node.v).lat != INT_MAX);
} }
#endif #endif
nodes.swap(m_edge_based_node_list); using std::swap; // Koenig swap
swap(nodes, m_edge_based_node_list);
}
void EdgeBasedGraphFactory::GetStartPointMarkers(std::vector<bool> &node_is_startpoint)
{
using std::swap; // Koenig swap
swap(m_edge_based_node_is_startpoint, node_is_startpoint);
} }
unsigned EdgeBasedGraphFactory::GetHighestEdgeID() unsigned EdgeBasedGraphFactory::GetHighestEdgeID()
@ -160,7 +172,8 @@ void EdgeBasedGraphFactory::InsertEdgeBasedNode(const NodeID node_u,
forward_data.name_id, forward_geometry[i].second, forward_data.name_id, forward_geometry[i].second,
reverse_geometry[geometry_size - 1 - i].second, forward_dist_prefix_sum[i], reverse_geometry[geometry_size - 1 - i].second, forward_dist_prefix_sum[i],
reverse_dist_prefix_sum[i], m_compressed_edge_container.GetPositionForID(edge_id_1), reverse_dist_prefix_sum[i], m_compressed_edge_container.GetPositionForID(edge_id_1),
INVALID_COMPONENTID, i, forward_data.travel_mode, reverse_data.travel_mode); false, INVALID_COMPONENTID, i, forward_data.travel_mode, reverse_data.travel_mode);
m_edge_based_node_is_startpoint.push_back(forward_data.startpoint || reverse_data.startpoint);
current_edge_source_coordinate_id = current_edge_target_coordinate_id; current_edge_source_coordinate_id = current_edge_target_coordinate_id;
BOOST_ASSERT(m_edge_based_node_list.back().IsCompressed()); BOOST_ASSERT(m_edge_based_node_list.back().IsCompressed());
@ -203,7 +216,8 @@ void EdgeBasedGraphFactory::InsertEdgeBasedNode(const NodeID node_u,
m_edge_based_node_list.emplace_back( m_edge_based_node_list.emplace_back(
forward_data.edge_id, reverse_data.edge_id, node_u, node_v, forward_data.edge_id, reverse_data.edge_id, node_u, node_v,
forward_data.name_id, forward_data.distance, reverse_data.distance, 0, 0, SPECIAL_EDGEID, forward_data.name_id, forward_data.distance, reverse_data.distance, 0, 0, SPECIAL_EDGEID,
INVALID_COMPONENTID, 0, forward_data.travel_mode, reverse_data.travel_mode); false, INVALID_COMPONENTID, 0, forward_data.travel_mode, reverse_data.travel_mode);
m_edge_based_node_is_startpoint.push_back(forward_data.startpoint || reverse_data.startpoint);
BOOST_ASSERT(!m_edge_based_node_list.back().IsCompressed()); BOOST_ASSERT(!m_edge_based_node_list.back().IsCompressed());
} }
} }
@ -220,8 +234,20 @@ void EdgeBasedGraphFactory::FlushVectorToStream(
original_edge_data_vector.clear(); original_edge_data_vector.clear();
} }
#ifdef DEBUG_GEOMETRY
void EdgeBasedGraphFactory::Run(const std::string &original_edge_data_filename, void EdgeBasedGraphFactory::Run(const std::string &original_edge_data_filename,
lua_State *lua_state) lua_State *lua_state,
const std::string &edge_segment_lookup_filename,
const std::string &edge_penalty_filename,
const bool generate_edge_lookup,
const std::string &debug_turns_path)
#else
void EdgeBasedGraphFactory::Run(const std::string &original_edge_data_filename,
lua_State *lua_state,
const std::string &edge_segment_lookup_filename,
const std::string &edge_penalty_filename,
const bool generate_edge_lookup)
#endif
{ {
TIMER_START(renumber); TIMER_START(renumber);
m_max_edge_id = RenumberEdges() - 1; m_max_edge_id = RenumberEdges() - 1;
@ -232,7 +258,16 @@ void EdgeBasedGraphFactory::Run(const std::string &original_edge_data_filename,
TIMER_STOP(generate_nodes); TIMER_STOP(generate_nodes);
TIMER_START(generate_edges); TIMER_START(generate_edges);
GenerateEdgeExpandedEdges(original_edge_data_filename, lua_state); #ifdef DEBUG_GEOMETRY
GenerateEdgeExpandedEdges(original_edge_data_filename, lua_state,
edge_segment_lookup_filename,edge_penalty_filename,
generate_edge_lookup, debug_turns_path);
#else
GenerateEdgeExpandedEdges(original_edge_data_filename, lua_state,
edge_segment_lookup_filename,edge_penalty_filename,
generate_edge_lookup);
#endif
TIMER_STOP(generate_edges); TIMER_STOP(generate_edges);
SimpleLogger().Write() << "Timing statistics for edge-expanded graph:"; SimpleLogger().Write() << "Timing statistics for edge-expanded graph:";
@ -311,13 +346,27 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedNodes()
} }
} }
BOOST_ASSERT(m_edge_based_node_list.size() == m_edge_based_node_is_startpoint.size());
SimpleLogger().Write() << "Generated " << m_edge_based_node_list.size() SimpleLogger().Write() << "Generated " << m_edge_based_node_list.size()
<< " nodes in edge-expanded graph"; << " nodes in edge-expanded graph";
} }
/// Actually it also generates OriginalEdgeData and serializes them... /// Actually it also generates OriginalEdgeData and serializes them...
#ifdef DEBUG_GEOMETRY
void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges( void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
const std::string &original_edge_data_filename, lua_State *lua_state) const std::string &original_edge_data_filename, lua_State *lua_state,
const std::string &edge_segment_lookup_filename,
const std::string &edge_fixed_penalties_filename,
const bool generate_edge_lookup,
const std::string &debug_turns_path)
#else
void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
const std::string &original_edge_data_filename, lua_State *lua_state,
const std::string &edge_segment_lookup_filename,
const std::string &edge_fixed_penalties_filename,
const bool generate_edge_lookup)
#endif
{ {
SimpleLogger().Write() << "generating edge-expanded edges"; SimpleLogger().Write() << "generating edge-expanded edges";
@ -325,6 +374,14 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
unsigned original_edges_counter = 0; unsigned original_edges_counter = 0;
std::ofstream edge_data_file(original_edge_data_filename.c_str(), std::ios::binary); std::ofstream edge_data_file(original_edge_data_filename.c_str(), std::ios::binary);
std::ofstream edge_segment_file;
std::ofstream edge_penalty_file;
if (generate_edge_lookup)
{
edge_segment_file.open(edge_segment_lookup_filename.c_str(), std::ios::binary);
edge_penalty_file.open(edge_fixed_penalties_filename.c_str(), std::ios::binary);
}
// writes a dummy value that is updated later // writes a dummy value that is updated later
edge_data_file.write((char *)&original_edges_counter, sizeof(unsigned)); edge_data_file.write((char *)&original_edges_counter, sizeof(unsigned));
@ -342,9 +399,13 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
Percent progress(m_node_based_graph->GetNumberOfNodes()); Percent progress(m_node_based_graph->GetNumberOfNodes());
#ifdef DEBUG_GEOMETRY
DEBUG_TURNS_START(debug_turns_path);
#endif
for (const auto node_u : osrm::irange(0u, m_node_based_graph->GetNumberOfNodes())) for (const auto node_u : osrm::irange(0u, m_node_based_graph->GetNumberOfNodes()))
{ {
progress.printStatus(node_u); //progress.printStatus(node_u);
for (const EdgeID e1 : m_node_based_graph->GetAdjacentEdgeRange(node_u)) for (const EdgeID e1 : m_node_based_graph->GetAdjacentEdgeRange(node_u))
{ {
if (m_node_based_graph->GetEdgeData(e1).reversed) if (m_node_based_graph->GetEdgeData(e1).reversed)
@ -415,6 +476,8 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
if (m_traffic_lights.find(node_v) != m_traffic_lights.end()) if (m_traffic_lights.find(node_v) != m_traffic_lights.end())
{ {
distance += speed_profile.traffic_signal_penalty; distance += speed_profile.traffic_signal_penalty;
DEBUG_SIGNAL(node_v, m_node_info_list, speed_profile.traffic_signal_penalty);
} }
// unpack last node of first segment if packed // unpack last node of first segment if packed
@ -437,7 +500,12 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
if (turn_instruction == TurnInstruction::UTurn) if (turn_instruction == TurnInstruction::UTurn)
{ {
distance += speed_profile.u_turn_penalty; distance += speed_profile.u_turn_penalty;
}
DEBUG_UTURN(node_v, m_node_info_list, speed_profile.u_turn_penalty);
}
DEBUG_TURN(node_v, m_node_info_list, first_coordinate, turn_angle, turn_penalty);
distance += turn_penalty; distance += turn_penalty;
const bool edge_is_compressed = m_compressed_edge_container.HasEntryForID(e1); const bool edge_is_compressed = m_compressed_edge_container.HasEntryForID(e1);
@ -462,11 +530,70 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
BOOST_ASSERT(SPECIAL_NODEID != edge_data1.edge_id); BOOST_ASSERT(SPECIAL_NODEID != edge_data1.edge_id);
BOOST_ASSERT(SPECIAL_NODEID != edge_data2.edge_id); BOOST_ASSERT(SPECIAL_NODEID != edge_data2.edge_id);
// NOTE: potential overflow here if we hit 2^32 routable edges
BOOST_ASSERT(m_edge_based_edge_list.size() <= std::numeric_limits<NodeID>::max());
m_edge_based_edge_list.emplace_back(edge_data1.edge_id, edge_data2.edge_id, m_edge_based_edge_list.emplace_back(edge_data1.edge_id, edge_data2.edge_id,
m_edge_based_edge_list.size(), distance, true, false); m_edge_based_edge_list.size(), distance, true, false);
// Here is where we write out the mapping between the edge-expanded edges, and
// the node-based edges that are originally used to calculate the `distance`
// for the edge-expanded edges. About 40 lines back, there is:
//
// unsigned distance = edge_data1.distance;
//
// This tells us that the weight for an edge-expanded-edge is based on the weight
// of the *source* node-based edge. Therefore, we will look up the individual
// segments of the source node-based edge, and write out a mapping between
// those and the edge-based-edge ID.
// External programs can then use this mapping to quickly perform
// updates to the edge-expanded-edge based directly on its ID.
if (generate_edge_lookup)
{
unsigned fixed_penalty = distance - edge_data1.distance;
edge_penalty_file.write(reinterpret_cast<const char *>(&fixed_penalty), sizeof(fixed_penalty));
if (edge_is_compressed)
{
const auto node_based_edges = m_compressed_edge_container.GetBucketReference(e1);
NodeID previous = node_u;
const unsigned node_count = node_based_edges.size()+1;
edge_segment_file.write(reinterpret_cast<const char *>(&node_count), sizeof(node_count));
const QueryNode &first_node = m_node_info_list[previous];
edge_segment_file.write(reinterpret_cast<const char *>(&first_node.node_id), sizeof(first_node.node_id));
for (auto target_node : node_based_edges)
{
const QueryNode &from = m_node_info_list[previous];
const QueryNode &to = m_node_info_list[target_node.first];
const double segment_length = coordinate_calculation::great_circle_distance(from.lat, from.lon, to.lat, to.lon);
edge_segment_file.write(reinterpret_cast<const char *>(&to.node_id), sizeof(to.node_id));
edge_segment_file.write(reinterpret_cast<const char *>(&segment_length), sizeof(segment_length));
edge_segment_file.write(reinterpret_cast<const char *>(&target_node.second), sizeof(target_node.second));
previous = target_node.first;
}
}
else
{
static const unsigned node_count = 2;
const QueryNode from = m_node_info_list[node_u];
const QueryNode to = m_node_info_list[node_v];
const double segment_length = coordinate_calculation::great_circle_distance(from.lat, from.lon, to.lat, to.lon);
edge_segment_file.write(reinterpret_cast<const char *>(&node_count), sizeof(node_count));
edge_segment_file.write(reinterpret_cast<const char *>(&from.node_id), sizeof(from.node_id));
edge_segment_file.write(reinterpret_cast<const char *>(&to.node_id), sizeof(to.node_id));
edge_segment_file.write(reinterpret_cast<const char *>(&segment_length), sizeof(segment_length));
edge_segment_file.write(reinterpret_cast<const char *>(&edge_data1.distance), sizeof(edge_data1.distance));
}
}
} }
} }
} }
DEBUG_TURNS_STOP();
FlushVectorToStream(edge_data_file, original_edge_data_vector); FlushVectorToStream(edge_data_file, original_edge_data_vector);
edge_data_file.seekp(std::ios::beg); edge_data_file.seekp(std::ios::beg);

View File

@ -50,6 +50,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <unordered_set> #include <unordered_set>
#include <vector> #include <vector>
#include <boost/filesystem/fstream.hpp>
struct lua_State; struct lua_State;
class EdgeBasedGraphFactory class EdgeBasedGraphFactory
@ -66,12 +68,25 @@ class EdgeBasedGraphFactory
const std::vector<QueryNode> &node_info_list, const std::vector<QueryNode> &node_info_list,
SpeedProfileProperties speed_profile); SpeedProfileProperties speed_profile);
#ifdef DEBUG_GEOMETRY
void Run(const std::string &original_edge_data_filename, void Run(const std::string &original_edge_data_filename,
lua_State *lua_state); lua_State *lua_state,
const std::string &edge_segment_lookup_filename,
const std::string &edge_penalty_filename,
const bool generate_edge_lookup,
const std::string &debug_turns_path);
#else
void Run(const std::string &original_edge_data_filename,
lua_State *lua_state,
const std::string &edge_segment_lookup_filename,
const std::string &edge_penalty_filename,
const bool generate_edge_lookup);
#endif
void GetEdgeBasedEdges(DeallocatingVector<EdgeBasedEdge> &edges); void GetEdgeBasedEdges(DeallocatingVector<EdgeBasedEdge> &edges);
void GetEdgeBasedNodes(std::vector<EdgeBasedNode> &nodes); void GetEdgeBasedNodes(std::vector<EdgeBasedNode> &nodes);
void GetStartPointMarkers(std::vector<bool> &node_is_startpoint);
unsigned GetHighestEdgeID(); unsigned GetHighestEdgeID();
@ -82,6 +97,9 @@ class EdgeBasedGraphFactory
private: private:
using EdgeData = NodeBasedDynamicGraph::EdgeData; using EdgeData = NodeBasedDynamicGraph::EdgeData;
//! maps index from m_edge_based_node_list to ture/false if the node is an entry point to the graph
std::vector<bool> m_edge_based_node_is_startpoint;
//! list of edge based nodes (compressed segments)
std::vector<EdgeBasedNode> m_edge_based_node_list; std::vector<EdgeBasedNode> m_edge_based_node_list;
DeallocatingVector<EdgeBasedEdge> m_edge_based_edge_list; DeallocatingVector<EdgeBasedEdge> m_edge_based_edge_list;
unsigned m_max_edge_id; unsigned m_max_edge_id;
@ -99,8 +117,20 @@ class EdgeBasedGraphFactory
void CompressGeometry(); void CompressGeometry();
unsigned RenumberEdges(); unsigned RenumberEdges();
void GenerateEdgeExpandedNodes(); void GenerateEdgeExpandedNodes();
#ifdef DEBUG_GEOMETRY
void GenerateEdgeExpandedEdges(const std::string &original_edge_data_filename, void GenerateEdgeExpandedEdges(const std::string &original_edge_data_filename,
lua_State *lua_state); lua_State *lua_state,
const std::string &edge_segment_lookup_filename,
const std::string &edge_fixed_penalties_filename,
const bool generate_edge_lookup,
const std::string &debug_turns_path);
#else
void GenerateEdgeExpandedEdges(const std::string &original_edge_data_filename,
lua_State *lua_state,
const std::string &edge_segment_lookup_filename,
const std::string &edge_fixed_penalties_filename,
const bool generate_edge_lookup);
#endif
void InsertEdgeBasedNode(const NodeID u, const NodeID v); void InsertEdgeBasedNode(const NodeID u, const NodeID v);

View File

@ -42,6 +42,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <boost/filesystem.hpp> #include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp> #include <boost/filesystem/fstream.hpp>
#include <boost/ref.hpp> #include <boost/ref.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <luabind/luabind.hpp> #include <luabind/luabind.hpp>
@ -50,11 +51,14 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <chrono> #include <chrono>
#include <limits> #include <limits>
static const int WRITE_BLOCK_BUFFER_SIZE = 8000;
ExtractionContainers::ExtractionContainers() ExtractionContainers::ExtractionContainers()
{ {
// Check if stxxl can be instantiated // Check if stxxl can be instantiated
stxxl::vector<unsigned> dummy_vector; stxxl::vector<unsigned> dummy_vector;
name_list.push_back(""); // Insert the empty string, it has no data and is zero length
name_lengths.push_back(0);
} }
ExtractionContainers::~ExtractionContainers() ExtractionContainers::~ExtractionContainers()
@ -63,7 +67,8 @@ ExtractionContainers::~ExtractionContainers()
used_node_id_list.clear(); used_node_id_list.clear();
all_nodes_list.clear(); all_nodes_list.clear();
all_edges_list.clear(); all_edges_list.clear();
name_list.clear(); name_char_data.clear();
name_lengths.clear();
restrictions_list.clear(); restrictions_list.clear();
way_start_end_id_list.clear(); way_start_end_id_list.clear();
} }
@ -115,13 +120,10 @@ void ExtractionContainers::WriteNames(const std::string& names_file_name) const
boost::filesystem::ofstream name_file_stream(names_file_name, std::ios::binary); boost::filesystem::ofstream name_file_stream(names_file_name, std::ios::binary);
unsigned total_length = 0; unsigned total_length = 0;
std::vector<unsigned> name_lengths;
for (const std::string &temp_string : name_list) for (const unsigned &name_length : name_lengths)
{ {
const unsigned string_length = total_length += name_length;
std::min(static_cast<unsigned>(temp_string.length()), 255u);
name_lengths.push_back(string_length);
total_length += string_length;
} }
// builds and writes the index // builds and writes the index
@ -129,14 +131,25 @@ void ExtractionContainers::WriteNames(const std::string& names_file_name) const
name_file_stream << name_index_range; name_file_stream << name_index_range;
name_file_stream.write((char *)&total_length, sizeof(unsigned)); name_file_stream.write((char *)&total_length, sizeof(unsigned));
// write all chars consecutively // write all chars consecutively
for (const std::string &temp_string : name_list) char write_buffer[WRITE_BLOCK_BUFFER_SIZE];
unsigned buffer_len = 0;
for (const char &c : name_char_data)
{ {
const unsigned string_length = write_buffer[buffer_len++] = c;
std::min(static_cast<unsigned>(temp_string.length()), 255u);
name_file_stream.write(temp_string.c_str(), string_length); if (buffer_len >= WRITE_BLOCK_BUFFER_SIZE)
{
name_file_stream.write(write_buffer, WRITE_BLOCK_BUFFER_SIZE);
buffer_len = 0;
}
} }
name_file_stream.write(write_buffer, buffer_len);
name_file_stream.close(); name_file_stream.close();
TIMER_STOP(write_name_index); TIMER_STOP(write_name_index);
std::cout << "ok, after " << TIMER_SEC(write_name_index) << "s" << std::endl; std::cout << "ok, after " << TIMER_SEC(write_name_index) << "s" << std::endl;
@ -171,7 +184,11 @@ void ExtractionContainers::PrepareNodes()
auto ref_iter = used_node_id_list.begin(); auto ref_iter = used_node_id_list.begin();
const auto all_nodes_list_end = all_nodes_list.end(); const auto all_nodes_list_end = all_nodes_list.end();
const auto used_node_id_list_end = used_node_id_list.end(); const auto used_node_id_list_end = used_node_id_list.end();
auto internal_id = 0u; // Note: despite being able to handle 64 bit OSM node ids, we can't
// handle > uint32_t actual usable nodes. This should be OK for a while
// because we usually route on a *lot* less than 2^32 of the OSM
// graph nodes.
std::size_t internal_id = 0;
// compute the intersection of nodes that were referenced and nodes we actually have // compute the intersection of nodes that were referenced and nodes we actually have
while (node_iter != all_nodes_list_end && ref_iter != used_node_id_list_end) while (node_iter != all_nodes_list_end && ref_iter != used_node_id_list_end)
@ -187,11 +204,15 @@ void ExtractionContainers::PrepareNodes()
continue; continue;
} }
BOOST_ASSERT(node_iter->node_id == *ref_iter); BOOST_ASSERT(node_iter->node_id == *ref_iter);
external_to_internal_node_id_map[*ref_iter] = internal_id++; external_to_internal_node_id_map[*ref_iter] = static_cast<NodeID>(internal_id++);
node_iter++; node_iter++;
ref_iter++; ref_iter++;
} }
max_internal_node_id = internal_id; if (internal_id > std::numeric_limits<NodeID>::max())
{
throw osrm::exception("There are too many nodes remaining after filtering, OSRM only supports 2^32 unique nodes");
}
max_internal_node_id = boost::numeric_cast<NodeID>(internal_id);
TIMER_STOP(id_map); TIMER_STOP(id_map);
std::cout << "ok, after " << TIMER_SEC(id_map) << "s" << std::endl; std::cout << "ok, after " << TIMER_SEC(id_map) << "s" << std::endl;
@ -202,7 +223,7 @@ void ExtractionContainers::PrepareEdges(lua_State *segment_state)
// Sort edges by start. // Sort edges by start.
std::cout << "[extractor] Sorting edges by start ... " << std::flush; std::cout << "[extractor] Sorting edges by start ... " << std::flush;
TIMER_START(sort_edges_by_start); TIMER_START(sort_edges_by_start);
stxxl::sort(all_edges_list.begin(), all_edges_list.end(), CmpEdgeByStartID(), stxxl_memory); stxxl::sort(all_edges_list.begin(), all_edges_list.end(), CmpEdgeByOSMStartID(), stxxl_memory);
TIMER_STOP(sort_edges_by_start); TIMER_STOP(sort_edges_by_start);
std::cout << "ok, after " << TIMER_SEC(sort_edges_by_start) << "s" << std::endl; std::cout << "ok, after " << TIMER_SEC(sort_edges_by_start) << "s" << std::endl;
@ -217,21 +238,21 @@ void ExtractionContainers::PrepareEdges(lua_State *segment_state)
while (edge_iterator != all_edges_list_end && node_iterator != all_nodes_list_end) while (edge_iterator != all_edges_list_end && node_iterator != all_nodes_list_end)
{ {
if (edge_iterator->result.source < node_iterator->node_id) if (edge_iterator->result.osm_source_id < node_iterator->node_id)
{ {
SimpleLogger().Write(LogLevel::logWARNING) << "Found invalid node reference " << edge_iterator->result.source; SimpleLogger().Write(LogLevel::logWARNING) << "Found invalid node reference " << edge_iterator->result.source;
edge_iterator->result.source = SPECIAL_NODEID; edge_iterator->result.source = SPECIAL_NODEID;
++edge_iterator; ++edge_iterator;
continue; continue;
} }
if (edge_iterator->result.source > node_iterator->node_id) if (edge_iterator->result.osm_source_id > node_iterator->node_id)
{ {
node_iterator++; node_iterator++;
continue; continue;
} }
// remove loops // remove loops
if (edge_iterator->result.source == edge_iterator->result.target) if (edge_iterator->result.osm_source_id == edge_iterator->result.osm_target_id)
{ {
edge_iterator->result.source = SPECIAL_NODEID; edge_iterator->result.source = SPECIAL_NODEID;
edge_iterator->result.target = SPECIAL_NODEID; edge_iterator->result.target = SPECIAL_NODEID;
@ -239,7 +260,7 @@ void ExtractionContainers::PrepareEdges(lua_State *segment_state)
continue; continue;
} }
BOOST_ASSERT(edge_iterator->result.source == node_iterator->node_id); BOOST_ASSERT(edge_iterator->result.osm_source_id == node_iterator->node_id);
// assign new node id // assign new node id
auto id_iter = external_to_internal_node_id_map.find(node_iterator->node_id); auto id_iter = external_to_internal_node_id_map.find(node_iterator->node_id);
@ -250,13 +271,24 @@ void ExtractionContainers::PrepareEdges(lua_State *segment_state)
edge_iterator->source_coordinate.lon = node_iterator->lon; edge_iterator->source_coordinate.lon = node_iterator->lon;
++edge_iterator; ++edge_iterator;
} }
// Remove all remaining edges. They are invalid because there are no corresponding nodes for
// them. This happens when using osmosis with bbox or polygon to extract smaller areas.
auto markSourcesInvalid = [](InternalExtractorEdge &edge)
{
SimpleLogger().Write(LogLevel::logWARNING) << "Found invalid node reference "
<< edge.result.source;
edge.result.source = SPECIAL_NODEID;
edge.result.osm_source_id = SPECIAL_OSM_NODEID;
};
std::for_each(edge_iterator, all_edges_list_end, markSourcesInvalid);
TIMER_STOP(set_start_coords); TIMER_STOP(set_start_coords);
std::cout << "ok, after " << TIMER_SEC(set_start_coords) << "s" << std::endl; std::cout << "ok, after " << TIMER_SEC(set_start_coords) << "s" << std::endl;
// Sort Edges by target // Sort Edges by target
std::cout << "[extractor] Sorting edges by target ... " << std::flush; std::cout << "[extractor] Sorting edges by target ... " << std::flush;
TIMER_START(sort_edges_by_target); TIMER_START(sort_edges_by_target);
stxxl::sort(all_edges_list.begin(), all_edges_list.end(), CmpEdgeByTargetID(), stxxl::sort(all_edges_list.begin(), all_edges_list.end(), CmpEdgeByOSMTargetID(),
stxxl_memory); stxxl_memory);
TIMER_STOP(sort_edges_by_target); TIMER_STOP(sort_edges_by_target);
std::cout << "ok, after " << TIMER_SEC(sort_edges_by_target) << "s" << std::endl; std::cout << "ok, after " << TIMER_SEC(sort_edges_by_target) << "s" << std::endl;
@ -278,25 +310,25 @@ void ExtractionContainers::PrepareEdges(lua_State *segment_state)
continue; continue;
} }
if (edge_iterator->result.target < node_iterator->node_id) if (edge_iterator->result.osm_target_id < node_iterator->node_id)
{ {
SimpleLogger().Write(LogLevel::logWARNING) << "Found invalid node reference " << edge_iterator->result.target; SimpleLogger().Write(LogLevel::logWARNING) << "Found invalid node reference " << OSMNodeID_to_uint64_t(edge_iterator->result.osm_target_id);
edge_iterator->result.target = SPECIAL_NODEID; edge_iterator->result.target = SPECIAL_NODEID;
++edge_iterator; ++edge_iterator;
continue; continue;
} }
if (edge_iterator->result.target > node_iterator->node_id) if (edge_iterator->result.osm_target_id > node_iterator->node_id)
{ {
++node_iterator; ++node_iterator;
continue; continue;
} }
BOOST_ASSERT(edge_iterator->result.target == node_iterator->node_id); BOOST_ASSERT(edge_iterator->result.osm_target_id == node_iterator->node_id);
BOOST_ASSERT(edge_iterator->weight_data.speed >= 0); BOOST_ASSERT(edge_iterator->weight_data.speed >= 0);
BOOST_ASSERT(edge_iterator->source_coordinate.lat != std::numeric_limits<int>::min()); BOOST_ASSERT(edge_iterator->source_coordinate.lat != std::numeric_limits<int>::min());
BOOST_ASSERT(edge_iterator->source_coordinate.lon != std::numeric_limits<int>::min()); BOOST_ASSERT(edge_iterator->source_coordinate.lon != std::numeric_limits<int>::min());
const double distance = coordinate_calculation::euclidean_distance( const double distance = coordinate_calculation::great_circle_distance(
edge_iterator->source_coordinate.lat, edge_iterator->source_coordinate.lon, edge_iterator->source_coordinate.lat, edge_iterator->source_coordinate.lon,
node_iterator->lat, node_iterator->lon); node_iterator->lat, node_iterator->lon);
@ -347,13 +379,23 @@ void ExtractionContainers::PrepareEdges(lua_State *segment_state)
} }
++edge_iterator; ++edge_iterator;
} }
// Remove all remaining edges. They are invalid because there are no corresponding nodes for
// them. This happens when using osmosis with bbox or polygon to extract smaller areas.
auto markTargetsInvalid = [](InternalExtractorEdge &edge)
{
SimpleLogger().Write(LogLevel::logWARNING) << "Found invalid node reference "
<< edge.result.target;
edge.result.target = SPECIAL_NODEID;
};
std::for_each(edge_iterator, all_edges_list_end_, markTargetsInvalid);
TIMER_STOP(compute_weights); TIMER_STOP(compute_weights);
std::cout << "ok, after " << TIMER_SEC(compute_weights) << "s" << std::endl; std::cout << "ok, after " << TIMER_SEC(compute_weights) << "s" << std::endl;
// Sort edges by start. // Sort edges by start.
std::cout << "[extractor] Sorting edges by renumbered start ... " << std::flush; std::cout << "[extractor] Sorting edges by renumbered start ... " << std::flush;
TIMER_START(sort_edges_by_renumbered_start); TIMER_START(sort_edges_by_renumbered_start);
stxxl::sort(all_edges_list.begin(), all_edges_list.end(), CmpEdgeByStartThenTargetID(), stxxl_memory); stxxl::sort(all_edges_list.begin(), all_edges_list.end(), CmpEdgeByInternalStartThenInternalTargetID(), stxxl_memory);
TIMER_STOP(sort_edges_by_renumbered_start); TIMER_STOP(sort_edges_by_renumbered_start);
std::cout << "ok, after " << TIMER_SEC(sort_edges_by_renumbered_start) << "s" << std::endl; std::cout << "ok, after " << TIMER_SEC(sort_edges_by_renumbered_start) << "s" << std::endl;
@ -444,13 +486,14 @@ void ExtractionContainers::PrepareEdges(lua_State *segment_state)
void ExtractionContainers::WriteEdges(std::ofstream& file_out_stream) const void ExtractionContainers::WriteEdges(std::ofstream& file_out_stream) const
{ {
std::cout << "[extractor] Writing used egdes ... " << std::flush; std::cout << "[extractor] Writing used edges ... " << std::flush;
TIMER_START(write_edges); TIMER_START(write_edges);
// Traverse list of edges and nodes in parallel and set target coord // Traverse list of edges and nodes in parallel and set target coord
unsigned number_of_used_edges = 0; std::size_t used_edges_counter = 0;
unsigned used_edges_counter_buffer = 0;
auto start_position = file_out_stream.tellp(); auto start_position = file_out_stream.tellp();
file_out_stream.write((char *)&number_of_used_edges, sizeof(unsigned)); file_out_stream.write((char *)&used_edges_counter_buffer, sizeof(unsigned));
for (const auto& edge : all_edges_list) for (const auto& edge : all_edges_list)
{ {
@ -459,18 +502,29 @@ void ExtractionContainers::WriteEdges(std::ofstream& file_out_stream) const
continue; continue;
} }
file_out_stream.write((char*) &edge.result, sizeof(NodeBasedEdge)); // IMPORTANT: here, we're using slicing to only write the data from the base
number_of_used_edges++; // class of NodeBasedEdgeWithOSM
NodeBasedEdge tmp = edge.result;
file_out_stream.write((char*) &tmp, sizeof(NodeBasedEdge));
used_edges_counter++;
}
if (used_edges_counter > std::numeric_limits<unsigned>::max())
{
throw osrm::exception("There are too many edges, OSRM only supports 2^32");
} }
TIMER_STOP(write_edges); TIMER_STOP(write_edges);
std::cout << "ok, after " << TIMER_SEC(write_edges) << "s" << std::endl; std::cout << "ok, after " << TIMER_SEC(write_edges) << "s" << std::endl;
std::cout << "[extractor] setting number of edges ... " << std::flush; std::cout << "[extractor] setting number of edges ... " << std::flush;
used_edges_counter_buffer = boost::numeric_cast<unsigned>(used_edges_counter);
file_out_stream.seekp(start_position); file_out_stream.seekp(start_position);
file_out_stream.write((char *)&number_of_used_edges, sizeof(unsigned)); file_out_stream.write((char *)&used_edges_counter_buffer, sizeof(unsigned));
std::cout << "ok" << std::endl; std::cout << "ok" << std::endl;
SimpleLogger().Write() << "Processed " << number_of_used_edges << " edges"; SimpleLogger().Write() << "Processed " << used_edges_counter << " edges";
} }
void ExtractionContainers::WriteNodes(std::ofstream& file_out_stream) const void ExtractionContainers::WriteNodes(std::ofstream& file_out_stream) const
@ -569,13 +623,13 @@ void ExtractionContainers::PrepareRestrictions()
while (way_start_and_end_iterator != way_start_end_id_list_end && while (way_start_and_end_iterator != way_start_end_id_list_end &&
restrictions_iterator != restrictions_list_end) restrictions_iterator != restrictions_list_end)
{ {
if (way_start_and_end_iterator->way_id < restrictions_iterator->restriction.from.way) if (way_start_and_end_iterator->way_id < OSMWayID(restrictions_iterator->restriction.from.way))
{ {
++way_start_and_end_iterator; ++way_start_and_end_iterator;
continue; continue;
} }
if (way_start_and_end_iterator->way_id > restrictions_iterator->restriction.from.way) if (way_start_and_end_iterator->way_id > OSMWayID(restrictions_iterator->restriction.from.way))
{ {
SimpleLogger().Write(LogLevel::logDEBUG) << "Restriction references invalid way: " << restrictions_iterator->restriction.from.way; SimpleLogger().Write(LogLevel::logDEBUG) << "Restriction references invalid way: " << restrictions_iterator->restriction.from.way;
restrictions_iterator->restriction.from.node = SPECIAL_NODEID; restrictions_iterator->restriction.from.node = SPECIAL_NODEID;
@ -584,9 +638,9 @@ void ExtractionContainers::PrepareRestrictions()
} }
BOOST_ASSERT(way_start_and_end_iterator->way_id == BOOST_ASSERT(way_start_and_end_iterator->way_id ==
restrictions_iterator->restriction.from.way); OSMWayID(restrictions_iterator->restriction.from.way));
// we do not remap the via id yet, since we will need it for the to node as well // we do not remap the via id yet, since we will need it for the to node as well
const NodeID via_node_id = restrictions_iterator->restriction.via.node; const OSMNodeID via_node_id = OSMNodeID(restrictions_iterator->restriction.via.node);
// check if via is actually valid, if not invalidate // check if via is actually valid, if not invalidate
auto via_id_iter = external_to_internal_node_id_map.find(via_node_id); auto via_id_iter = external_to_internal_node_id_map.find(via_node_id);
@ -598,19 +652,19 @@ void ExtractionContainers::PrepareRestrictions()
continue; continue;
} }
if (way_start_and_end_iterator->first_segment_source_id == via_node_id) if (OSMNodeID(way_start_and_end_iterator->first_segment_source_id) == via_node_id)
{ {
// assign new from node id // assign new from node id
auto id_iter = external_to_internal_node_id_map.find( auto id_iter = external_to_internal_node_id_map.find(
way_start_and_end_iterator->first_segment_target_id); OSMNodeID(way_start_and_end_iterator->first_segment_target_id));
BOOST_ASSERT(id_iter != external_to_internal_node_id_map.end()); BOOST_ASSERT(id_iter != external_to_internal_node_id_map.end());
restrictions_iterator->restriction.from.node = id_iter->second; restrictions_iterator->restriction.from.node = id_iter->second;
} }
else if (way_start_and_end_iterator->last_segment_target_id == via_node_id) else if (OSMNodeID(way_start_and_end_iterator->last_segment_target_id) == via_node_id)
{ {
// assign new from node id // assign new from node id
auto id_iter = external_to_internal_node_id_map.find( auto id_iter = external_to_internal_node_id_map.find(
way_start_and_end_iterator->last_segment_source_id); OSMNodeID(way_start_and_end_iterator->last_segment_source_id));
BOOST_ASSERT(id_iter != external_to_internal_node_id_map.end()); BOOST_ASSERT(id_iter != external_to_internal_node_id_map.end());
restrictions_iterator->restriction.from.node = id_iter->second; restrictions_iterator->restriction.from.node = id_iter->second;
} }
@ -637,7 +691,7 @@ void ExtractionContainers::PrepareRestrictions()
while (way_start_and_end_iterator != way_start_end_id_list_end_ && while (way_start_and_end_iterator != way_start_end_id_list_end_ &&
restrictions_iterator != restrictions_list_end_) restrictions_iterator != restrictions_list_end_)
{ {
if (way_start_and_end_iterator->way_id < restrictions_iterator->restriction.to.way) if (way_start_and_end_iterator->way_id < OSMWayID(restrictions_iterator->restriction.to.way))
{ {
++way_start_and_end_iterator; ++way_start_and_end_iterator;
continue; continue;
@ -648,7 +702,7 @@ void ExtractionContainers::PrepareRestrictions()
++restrictions_iterator; ++restrictions_iterator;
continue; continue;
} }
if (way_start_and_end_iterator->way_id > restrictions_iterator->restriction.to.way) if (way_start_and_end_iterator->way_id > OSMWayID(restrictions_iterator->restriction.to.way))
{ {
SimpleLogger().Write(LogLevel::logDEBUG) << "Restriction references invalid way: " << restrictions_iterator->restriction.to.way; SimpleLogger().Write(LogLevel::logDEBUG) << "Restriction references invalid way: " << restrictions_iterator->restriction.to.way;
restrictions_iterator->restriction.to.way = SPECIAL_NODEID; restrictions_iterator->restriction.to.way = SPECIAL_NODEID;
@ -656,25 +710,25 @@ void ExtractionContainers::PrepareRestrictions()
continue; continue;
} }
BOOST_ASSERT(way_start_and_end_iterator->way_id == BOOST_ASSERT(way_start_and_end_iterator->way_id ==
restrictions_iterator->restriction.to.way); OSMWayID(restrictions_iterator->restriction.to.way));
const NodeID via_node_id = restrictions_iterator->restriction.via.node; const OSMNodeID via_node_id = OSMNodeID(restrictions_iterator->restriction.via.node);
// assign new via node id // assign new via node id
auto via_id_iter = external_to_internal_node_id_map.find(via_node_id); auto via_id_iter = external_to_internal_node_id_map.find(via_node_id);
BOOST_ASSERT(via_id_iter != external_to_internal_node_id_map.end()); BOOST_ASSERT(via_id_iter != external_to_internal_node_id_map.end());
restrictions_iterator->restriction.via.node = via_id_iter->second; restrictions_iterator->restriction.via.node = via_id_iter->second;
if (way_start_and_end_iterator->first_segment_source_id == via_node_id) if (OSMNodeID(way_start_and_end_iterator->first_segment_source_id) == via_node_id)
{ {
auto to_id_iter = external_to_internal_node_id_map.find( auto to_id_iter = external_to_internal_node_id_map.find(
way_start_and_end_iterator->first_segment_target_id); OSMNodeID(way_start_and_end_iterator->first_segment_target_id));
BOOST_ASSERT(to_id_iter != external_to_internal_node_id_map.end()); BOOST_ASSERT(to_id_iter != external_to_internal_node_id_map.end());
restrictions_iterator->restriction.to.node = to_id_iter->second; restrictions_iterator->restriction.to.node = to_id_iter->second;
} }
else if (way_start_and_end_iterator->last_segment_target_id == via_node_id) else if (OSMNodeID(way_start_and_end_iterator->last_segment_target_id) == via_node_id)
{ {
auto to_id_iter = external_to_internal_node_id_map.find( auto to_id_iter = external_to_internal_node_id_map.find(
way_start_and_end_iterator->last_segment_source_id); OSMNodeID(way_start_and_end_iterator->last_segment_source_id));
BOOST_ASSERT(to_id_iter != external_to_internal_node_id_map.end()); BOOST_ASSERT(to_id_iter != external_to_internal_node_id_map.end());
restrictions_iterator->restriction.to.node = to_id_iter->second; restrictions_iterator->restriction.to.node = to_id_iter->second;
} }

View File

@ -61,20 +61,20 @@ class ExtractionContainers
void WriteEdges(std::ofstream& file_out_stream) const; void WriteEdges(std::ofstream& file_out_stream) const;
void WriteNames(const std::string& names_file_name) const; void WriteNames(const std::string& names_file_name) const;
public: public:
using STXXLNodeIDVector = stxxl::vector<NodeID>; using STXXLNodeIDVector = stxxl::vector<OSMNodeID>;
using STXXLNodeVector = stxxl::vector<ExternalMemoryNode>; using STXXLNodeVector = stxxl::vector<ExternalMemoryNode>;
using STXXLEdgeVector = stxxl::vector<InternalExtractorEdge>; using STXXLEdgeVector = stxxl::vector<InternalExtractorEdge>;
using STXXLStringVector = stxxl::vector<std::string>;
using STXXLRestrictionsVector = stxxl::vector<InputRestrictionContainer>; using STXXLRestrictionsVector = stxxl::vector<InputRestrictionContainer>;
using STXXLWayIDStartEndVector = stxxl::vector<FirstAndLastSegmentOfWay>; using STXXLWayIDStartEndVector = stxxl::vector<FirstAndLastSegmentOfWay>;
STXXLNodeIDVector used_node_id_list; STXXLNodeIDVector used_node_id_list;
STXXLNodeVector all_nodes_list; STXXLNodeVector all_nodes_list;
STXXLEdgeVector all_edges_list; STXXLEdgeVector all_edges_list;
STXXLStringVector name_list; stxxl::vector<char> name_char_data;
stxxl::vector<unsigned> name_lengths;
STXXLRestrictionsVector restrictions_list; STXXLRestrictionsVector restrictions_list;
STXXLWayIDStartEndVector way_start_end_id_list; STXXLWayIDStartEndVector way_start_end_id_list;
std::unordered_map<NodeID, NodeID> external_to_internal_node_id_map; std::unordered_map<OSMNodeID, NodeID> external_to_internal_node_id_map;
unsigned max_internal_node_id; unsigned max_internal_node_id;
ExtractionContainers(); ExtractionContainers();

View File

@ -37,6 +37,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <boost/regex.hpp> #include <boost/regex.hpp>
#include <limits> #include <limits>
#include <string>
bool simple_duration_is_valid(const std::string &s) bool simple_duration_is_valid(const std::string &s)
{ {
@ -89,18 +90,18 @@ unsigned parseDuration(const std::string &s)
{ {
if (1 == result.size()) if (1 == result.size())
{ {
minutes = cast::string_to_int(result[0]); minutes = std::stoul(result[0]);
} }
if (2 == result.size()) if (2 == result.size())
{ {
minutes = cast::string_to_int(result[1]); minutes = std::stoul(result[1]);
hours = cast::string_to_int(result[0]); hours = std::stoul(result[0]);
} }
if (3 == result.size()) if (3 == result.size())
{ {
seconds = cast::string_to_int(result[2]); seconds = std::stoul(result[2]);
minutes = cast::string_to_int(result[1]); minutes = std::stoul(result[1]);
hours = cast::string_to_int(result[0]); hours = std::stoul(result[0]);
} }
return (3600 * hours + 60 * minutes + seconds); return (3600 * hours + 60 * minutes + seconds);
} }

View File

@ -50,6 +50,7 @@ struct ExtractionWay
backward_speed = -1; backward_speed = -1;
duration = -1; duration = -1;
roundabout = false; roundabout = false;
is_startpoint = true;
is_access_restricted = false; is_access_restricted = false;
name.clear(); name.clear();
forward_travel_mode = TRAVEL_MODE_DEFAULT; forward_travel_mode = TRAVEL_MODE_DEFAULT;
@ -120,6 +121,7 @@ struct ExtractionWay
std::string name; std::string name;
bool roundabout; bool roundabout;
bool is_access_restricted; bool is_access_restricted;
bool is_startpoint;
TravelMode forward_travel_mode : 4; TravelMode forward_travel_mode : 4;
TravelMode backward_travel_mode : 4; TravelMode backward_travel_mode : 4;
}; };

View File

@ -35,16 +35,25 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "scripting_environment.hpp" #include "scripting_environment.hpp"
#include "../data_structures/raster_source.hpp" #include "../data_structures/raster_source.hpp"
#include "../util/git_sha.hpp"
#include "../util/make_unique.hpp" #include "../util/make_unique.hpp"
#include "../util/simple_logger.hpp" #include "../util/simple_logger.hpp"
#include "../util/timing_util.hpp" #include "../util/timing_util.hpp"
#include "../util/lua_util.hpp" #include "../util/lua_util.hpp"
#include "../util/graph_loader.hpp"
#include "../typedefs.h" #include "../typedefs.h"
#include "../data_structures/static_graph.hpp"
#include "../data_structures/static_rtree.hpp"
#include "../data_structures/restriction_map.hpp"
#include "../data_structures/compressed_edge_container.hpp"
#include "../algorithms/tarjan_scc.hpp"
#include "../algorithms/crc32_processor.hpp"
#include <boost/filesystem.hpp> #include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp> #include <boost/filesystem/fstream.hpp>
#include <boost/optional/optional.hpp>
#include <luabind/luabind.hpp> #include <luabind/luabind.hpp>
@ -53,8 +62,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <tbb/parallel_for.h> #include <tbb/parallel_for.h>
#include <tbb/task_scheduler_init.h> #include <tbb/task_scheduler_init.h>
#include <variant/optional.hpp>
#include <cstdlib> #include <cstdlib>
#include <algorithm> #include <algorithm>
@ -81,7 +88,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* The result of this process are the following files: * The result of this process are the following files:
* .names : Names of all streets, stored as long consecutive string with prefix sum based index * .names : Names of all streets, stored as long consecutive string with prefix sum based index
* .osrm : Nodes and edges in a intermediate format that easy to digest for osrm-prepare * .osrm : Nodes and edges in a intermediate format that easy to digest for osrm-prepare
* .restrictions : Turn restrictions that are used my osrm-prepare to construct the edge-expanded graph * .restrictions : Turn restrictions that are used my osrm-prepare to construct the edge-expanded
* graph
* *
*/ */
int extractor::run() int extractor::run()
@ -151,8 +159,7 @@ int extractor::run()
// initialize vectors holding parsed objects // initialize vectors holding parsed objects
tbb::concurrent_vector<std::pair<std::size_t, ExtractionNode>> resulting_nodes; tbb::concurrent_vector<std::pair<std::size_t, ExtractionNode>> resulting_nodes;
tbb::concurrent_vector<std::pair<std::size_t, ExtractionWay>> resulting_ways; tbb::concurrent_vector<std::pair<std::size_t, ExtractionWay>> resulting_ways;
tbb::concurrent_vector<mapbox::util::optional<InputRestrictionContainer>> tbb::concurrent_vector<boost::optional<InputRestrictionContainer>> resulting_restrictions;
resulting_restrictions;
// setup restriction parser // setup restriction parser
const RestrictionParser restriction_parser(scripting_environment.get_lua_state()); const RestrictionParser restriction_parser(scripting_environment.get_lua_state());
@ -249,21 +256,383 @@ int extractor::run()
return 1; return 1;
} }
extraction_containers.PrepareData(config.output_file_name, extraction_containers.PrepareData(config.output_file_name, config.restriction_file_name,
config.restriction_file_name, config.names_file_name, segment_state);
config.names_file_name,
segment_state);
TIMER_STOP(extracting); TIMER_STOP(extracting);
SimpleLogger().Write() << "extraction finished after " << TIMER_SEC(extracting) << "s"; SimpleLogger().Write() << "extraction finished after " << TIMER_SEC(extracting) << "s";
SimpleLogger().Write() << "To prepare the data for routing, run: "
<< "./osrm-prepare " << config.output_file_name
<< std::endl;
} }
catch (std::exception &e) catch (const std::exception &e)
{ {
SimpleLogger().Write(logWARNING) << e.what(); SimpleLogger().Write(logWARNING) << e.what();
return 1; return 1;
} }
try
{
// Transform the node-based graph that OSM is based on into an edge-based graph
// that is better for routing. Every edge becomes a node, and every valid
// movement (e.g. turn from A->B, and B->A) becomes an edge
//
//
// // Create a new lua state
SimpleLogger().Write() << "Generating edge-expanded graph representation";
TIMER_START(expansion);
std::vector<EdgeBasedNode> node_based_edge_list;
DeallocatingVector<EdgeBasedEdge> edge_based_edge_list;
std::vector<bool> node_is_startpoint;
std::vector<QueryNode> internal_to_external_node_map;
auto graph_size =
BuildEdgeExpandedGraph(internal_to_external_node_map, node_based_edge_list,
node_is_startpoint, edge_based_edge_list);
auto number_of_node_based_nodes = graph_size.first;
auto max_edge_id = graph_size.second;
TIMER_STOP(expansion);
SimpleLogger().Write() << "building r-tree ...";
TIMER_START(rtree);
FindComponents(max_edge_id, edge_based_edge_list, node_based_edge_list);
BuildRTree(std::move(node_based_edge_list), std::move(node_is_startpoint),
internal_to_external_node_map);
TIMER_STOP(rtree);
SimpleLogger().Write() << "writing node map ...";
WriteNodeMapping(internal_to_external_node_map);
WriteEdgeBasedGraph(config.edge_graph_output_path, max_edge_id, edge_based_edge_list);
SimpleLogger().Write() << "Expansion : "
<< (number_of_node_based_nodes / TIMER_SEC(expansion))
<< " nodes/sec and " << ((max_edge_id + 1) / TIMER_SEC(expansion))
<< " edges/sec";
SimpleLogger().Write() << "To prepare the data for routing, run: "
<< "./osrm-prepare " << config.output_file_name << std::endl;
}
catch (const std::exception &e)
{
SimpleLogger().Write(logWARNING) << e.what();
return 1;
}
return 0; return 0;
} }
/**
\brief Setups scripting environment (lua-scripting)
Also initializes speed profile.
*/
void extractor::SetupScriptingEnvironment(lua_State *lua_state,
SpeedProfileProperties &speed_profile)
{
// open utility libraries string library;
luaL_openlibs(lua_state);
// adjust lua load path
luaAddScriptFolderToLoadPath(lua_state, config.profile_path.string().c_str());
// Now call our function in a lua script
if (0 != luaL_dofile(lua_state, config.profile_path.string().c_str()))
{
std::stringstream msg;
msg << lua_tostring(lua_state, -1) << " occured in scripting block";
throw osrm::exception(msg.str());
}
if (0 != luaL_dostring(lua_state, "return traffic_signal_penalty\n"))
{
std::stringstream msg;
msg << lua_tostring(lua_state, -1) << " occured in scripting block";
throw osrm::exception(msg.str());
}
speed_profile.traffic_signal_penalty = 10 * lua_tointeger(lua_state, -1);
SimpleLogger().Write(logDEBUG) << "traffic_signal_penalty: "
<< speed_profile.traffic_signal_penalty;
if (0 != luaL_dostring(lua_state, "return u_turn_penalty\n"))
{
std::stringstream msg;
msg << lua_tostring(lua_state, -1) << " occured in scripting block";
throw osrm::exception(msg.str());
}
speed_profile.u_turn_penalty = 10 * lua_tointeger(lua_state, -1);
speed_profile.has_turn_penalty_function = lua_function_exists(lua_state, "turn_function");
}
void extractor::FindComponents(unsigned max_edge_id,
const DeallocatingVector<EdgeBasedEdge> &input_edge_list,
std::vector<EdgeBasedNode> &input_nodes) const
{
struct UncontractedEdgeData
{
};
struct InputEdge
{
unsigned source;
unsigned target;
UncontractedEdgeData data;
bool operator<(const InputEdge &rhs) const
{
return source < rhs.source || (source == rhs.source && target < rhs.target);
}
bool operator==(const InputEdge &rhs) const
{
return source == rhs.source && target == rhs.target;
}
};
using UncontractedGraph = StaticGraph<UncontractedEdgeData>;
std::vector<InputEdge> edges;
edges.reserve(input_edge_list.size() * 2);
for (const auto &edge : input_edge_list)
{
BOOST_ASSERT_MSG(static_cast<unsigned int>(std::max(edge.weight, 1)) > 0,
"edge distance < 1");
if (edge.forward)
{
edges.push_back({edge.source, edge.target, {}});
}
if (edge.backward)
{
edges.push_back({edge.target, edge.source, {}});
}
}
// connect forward and backward nodes of each edge
for (const auto &node : input_nodes)
{
if (node.reverse_edge_based_node_id != SPECIAL_NODEID)
{
edges.push_back({node.forward_edge_based_node_id, node.reverse_edge_based_node_id, {}});
edges.push_back({node.reverse_edge_based_node_id, node.forward_edge_based_node_id, {}});
}
}
tbb::parallel_sort(edges.begin(), edges.end());
auto new_end = std::unique(edges.begin(), edges.end());
edges.resize(new_end - edges.begin());
auto uncontractor_graph = std::make_shared<UncontractedGraph>(max_edge_id + 1, edges);
TarjanSCC<UncontractedGraph> component_search(
std::const_pointer_cast<const UncontractedGraph>(uncontractor_graph));
component_search.run();
for (auto &node : input_nodes)
{
auto forward_component = component_search.get_component_id(node.forward_edge_based_node_id);
BOOST_ASSERT(node.reverse_edge_based_node_id == SPECIAL_EDGEID ||
forward_component ==
component_search.get_component_id(node.reverse_edge_based_node_id));
const unsigned component_size = component_search.get_component_size(forward_component);
node.component.is_tiny = component_size < config.small_component_size;
node.component.id = 1 + forward_component;
}
}
/**
\brief Build load restrictions from .restriction file
*/
std::shared_ptr<RestrictionMap> extractor::LoadRestrictionMap()
{
boost::filesystem::ifstream input_stream(config.restriction_file_name,
std::ios::in | std::ios::binary);
std::vector<TurnRestriction> restriction_list;
loadRestrictionsFromFile(input_stream, restriction_list);
SimpleLogger().Write() << " - " << restriction_list.size() << " restrictions.";
return std::make_shared<RestrictionMap>(restriction_list);
}
/**
\brief Load node based graph from .osrm file
*/
std::shared_ptr<NodeBasedDynamicGraph>
extractor::LoadNodeBasedGraph(std::unordered_set<NodeID> &barrier_nodes,
std::unordered_set<NodeID> &traffic_lights,
std::vector<QueryNode> &internal_to_external_node_map)
{
std::vector<NodeBasedEdge> edge_list;
boost::filesystem::ifstream input_stream(config.output_file_name,
std::ios::in | std::ios::binary);
std::vector<NodeID> barrier_list;
std::vector<NodeID> traffic_light_list;
NodeID number_of_node_based_nodes = loadNodesFromFile(
input_stream, barrier_list, traffic_light_list, internal_to_external_node_map);
SimpleLogger().Write() << " - " << barrier_list.size() << " bollard nodes, "
<< traffic_light_list.size() << " traffic lights";
// insert into unordered sets for fast lookup
barrier_nodes.insert(barrier_list.begin(), barrier_list.end());
traffic_lights.insert(traffic_light_list.begin(), traffic_light_list.end());
barrier_list.clear();
barrier_list.shrink_to_fit();
traffic_light_list.clear();
traffic_light_list.shrink_to_fit();
loadEdgesFromFile(input_stream, edge_list);
if (edge_list.empty())
{
SimpleLogger().Write(logWARNING) << "The input data is empty, exiting.";
return std::shared_ptr<NodeBasedDynamicGraph>();
}
return NodeBasedDynamicGraphFromEdges(number_of_node_based_nodes, edge_list);
}
/**
\brief Building an edge-expanded graph from node-based input and turn restrictions
*/
std::pair<std::size_t, std::size_t>
extractor::BuildEdgeExpandedGraph(std::vector<QueryNode> &internal_to_external_node_map,
std::vector<EdgeBasedNode> &node_based_edge_list,
std::vector<bool> &node_is_startpoint,
DeallocatingVector<EdgeBasedEdge> &edge_based_edge_list)
{
lua_State *lua_state = luaL_newstate();
luabind::open(lua_state);
SpeedProfileProperties speed_profile;
SetupScriptingEnvironment(lua_state, speed_profile);
std::unordered_set<NodeID> barrier_nodes;
std::unordered_set<NodeID> traffic_lights;
auto restriction_map = LoadRestrictionMap();
auto node_based_graph =
LoadNodeBasedGraph(barrier_nodes, traffic_lights, internal_to_external_node_map);
CompressedEdgeContainer compressed_edge_container;
GraphCompressor graph_compressor(speed_profile);
graph_compressor.Compress(barrier_nodes, traffic_lights, *restriction_map, *node_based_graph,
compressed_edge_container);
EdgeBasedGraphFactory edge_based_graph_factory(
node_based_graph, compressed_edge_container, barrier_nodes, traffic_lights,
std::const_pointer_cast<RestrictionMap const>(restriction_map),
internal_to_external_node_map, speed_profile);
compressed_edge_container.SerializeInternalVector(config.geometry_output_path);
edge_based_graph_factory.Run(config.edge_output_path, lua_state,
config.edge_segment_lookup_path, config.edge_penalty_path,
config.generate_edge_lookup
#ifdef DEBUG_GEOMETRY
,
config.debug_turns_path
#endif
);
lua_close(lua_state);
edge_based_graph_factory.GetEdgeBasedEdges(edge_based_edge_list);
edge_based_graph_factory.GetEdgeBasedNodes(node_based_edge_list);
edge_based_graph_factory.GetStartPointMarkers(node_is_startpoint);
auto max_edge_id = edge_based_graph_factory.GetHighestEdgeID();
const std::size_t number_of_node_based_nodes = node_based_graph->GetNumberOfNodes();
return std::make_pair(number_of_node_based_nodes, max_edge_id);
}
/**
\brief Writing info on original (node-based) nodes
*/
void extractor::WriteNodeMapping(const std::vector<QueryNode> &internal_to_external_node_map)
{
boost::filesystem::ofstream node_stream(config.node_output_path, std::ios::binary);
const unsigned size_of_mapping = internal_to_external_node_map.size();
node_stream.write((char *)&size_of_mapping, sizeof(unsigned));
if (size_of_mapping > 0)
{
node_stream.write((char *)internal_to_external_node_map.data(),
size_of_mapping * sizeof(QueryNode));
}
node_stream.close();
}
/**
\brief Building rtree-based nearest-neighbor data structure
Saves tree into '.ramIndex' and leaves into '.fileIndex'.
*/
void extractor::BuildRTree(std::vector<EdgeBasedNode> node_based_edge_list,
std::vector<bool> node_is_startpoint,
const std::vector<QueryNode> &internal_to_external_node_map)
{
SimpleLogger().Write() << "constructing r-tree of " << node_based_edge_list.size()
<< " edge elements build on-top of "
<< internal_to_external_node_map.size() << " coordinates";
BOOST_ASSERT(node_is_startpoint.size() == node_based_edge_list.size());
// Filter node based edges based on startpoint
auto out_iter = node_based_edge_list.begin();
auto in_iter = node_based_edge_list.begin();
for (auto index : osrm::irange<std::size_t>(0, node_is_startpoint.size()))
{
BOOST_ASSERT(in_iter != node_based_edge_list.end());
if (node_is_startpoint[index])
{
*out_iter = *in_iter;
out_iter++;
}
in_iter++;
}
auto new_size = out_iter - node_based_edge_list.begin();
node_based_edge_list.resize(new_size);
TIMER_START(construction);
StaticRTree<EdgeBasedNode>(node_based_edge_list, config.rtree_nodes_output_path,
config.rtree_leafs_output_path, internal_to_external_node_map);
TIMER_STOP(construction);
SimpleLogger().Write() << "finished r-tree construction in " << TIMER_SEC(construction)
<< " seconds";
}
void extractor::WriteEdgeBasedGraph(std::string const &output_file_filename,
size_t const max_edge_id,
DeallocatingVector<EdgeBasedEdge> const &edge_based_edge_list)
{
std::ofstream file_out_stream;
file_out_stream.open(output_file_filename.c_str(), std::ios::binary);
const FingerPrint fingerprint = FingerPrint::GetValid();
file_out_stream.write((char *)&fingerprint, sizeof(FingerPrint));
std::cout << "[extractor] Writing edge-based-graph egdes ... " << std::flush;
TIMER_START(write_edges);
size_t number_of_used_edges = edge_based_edge_list.size();
file_out_stream.write((char *)&number_of_used_edges, sizeof(size_t));
file_out_stream.write((char *)&max_edge_id, sizeof(size_t));
for (const auto &edge : edge_based_edge_list)
{
file_out_stream.write((char *)&edge, sizeof(EdgeBasedEdge));
}
TIMER_STOP(write_edges);
std::cout << "ok, after " << TIMER_SEC(write_edges) << "s" << std::endl;
SimpleLogger().Write() << "Processed " << number_of_used_edges << " edges";
file_out_stream.close();
}

View File

@ -29,13 +29,38 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define EXTRACTOR_HPP #define EXTRACTOR_HPP
#include "extractor_options.hpp" #include "extractor_options.hpp"
#include "edge_based_graph_factory.hpp"
#include "../algorithms/graph_compressor.hpp"
class extractor class extractor
{ {
public: public:
extractor(ExtractorConfig extractor_config) : config(std::move(extractor_config)) {} extractor(ExtractorConfig extractor_config) : config(std::move(extractor_config)) {}
int run(); int run();
private:
ExtractorConfig config; private:
ExtractorConfig config;
void SetupScriptingEnvironment(lua_State *myLuaState, SpeedProfileProperties &speed_profile);
std::pair<std::size_t, std::size_t>
BuildEdgeExpandedGraph(std::vector<QueryNode> &internal_to_external_node_map,
std::vector<EdgeBasedNode> &node_based_edge_list,
std::vector<bool> &node_is_startpoint,
DeallocatingVector<EdgeBasedEdge> &edge_based_edge_list);
void WriteNodeMapping(const std::vector<QueryNode> &internal_to_external_node_map);
void FindComponents(unsigned max_edge_id,
const DeallocatingVector<EdgeBasedEdge> &edges,
std::vector<EdgeBasedNode> &nodes) const;
void BuildRTree(std::vector<EdgeBasedNode> node_based_edge_list,
std::vector<bool> node_is_startpoint,
const std::vector<QueryNode> &internal_to_external_node_map);
std::shared_ptr<RestrictionMap> LoadRestrictionMap();
std::shared_ptr<NodeBasedDynamicGraph>
LoadNodeBasedGraph(std::unordered_set<NodeID> &barrier_nodes,
std::unordered_set<NodeID> &traffic_lights,
std::vector<QueryNode> &internal_to_external_node_map);
void WriteEdgeBasedGraph(std::string const &output_file_filename,
size_t const max_edge_id,
DeallocatingVector<EdgeBasedEdge> const &edge_based_edge_list);
}; };
#endif /* EXTRACTOR_HPP */ #endif /* EXTRACTOR_HPP */

View File

@ -35,6 +35,10 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../util/container.hpp" #include "../util/container.hpp"
#include "../util/simple_logger.hpp" #include "../util/simple_logger.hpp"
#include <boost/optional/optional.hpp>
#include <osmium/osm.hpp>
#include <osrm/coordinate.hpp> #include <osrm/coordinate.hpp>
#include <limits> #include <limits>
@ -59,13 +63,13 @@ void ExtractorCallbacks::ProcessNode(const osmium::Node &input_node,
external_memory.all_nodes_list.push_back( external_memory.all_nodes_list.push_back(
{static_cast<int>(input_node.location().lat() * COORDINATE_PRECISION), {static_cast<int>(input_node.location().lat() * COORDINATE_PRECISION),
static_cast<int>(input_node.location().lon() * COORDINATE_PRECISION), static_cast<int>(input_node.location().lon() * COORDINATE_PRECISION),
static_cast<NodeID>(input_node.id()), OSMNodeID(input_node.id()),
result_node.barrier, result_node.barrier,
result_node.traffic_lights}); result_node.traffic_lights});
} }
void ExtractorCallbacks::ProcessRestriction( void ExtractorCallbacks::ProcessRestriction(
const mapbox::util::optional<InputRestrictionContainer> &restriction) const boost::optional<InputRestrictionContainer> &restriction)
{ {
if (restriction) if (restriction)
{ {
@ -140,8 +144,8 @@ void ExtractorCallbacks::ProcessWay(const osmium::Way &input_way, const Extracti
} }
} }
if (forward_weight_data.type == InternalExtractorEdge::WeightType::INVALID if (forward_weight_data.type == InternalExtractorEdge::WeightType::INVALID &&
&& backward_weight_data.type == InternalExtractorEdge::WeightType::INVALID) backward_weight_data.type == InternalExtractorEdge::WeightType::INVALID)
{ {
SimpleLogger().Write(logDEBUG) << "found way with bogus speed, id: " << input_way.id(); SimpleLogger().Write(logDEBUG) << "found way with bogus speed, id: " << input_way.id();
return; return;
@ -149,10 +153,12 @@ void ExtractorCallbacks::ProcessWay(const osmium::Way &input_way, const Extracti
// Get the unique identifier for the street name // Get the unique identifier for the street name
const auto &string_map_iterator = string_map.find(parsed_way.name); const auto &string_map_iterator = string_map.find(parsed_way.name);
unsigned name_id = external_memory.name_list.size(); unsigned name_id = external_memory.name_lengths.size();
if (string_map.end() == string_map_iterator) if (string_map.end() == string_map_iterator)
{ {
external_memory.name_list.push_back(parsed_way.name); auto name_length = std::min<unsigned>(255u, parsed_way.name.size());
std::copy(parsed_way.name.c_str(), parsed_way.name.c_str() + name_length, std::back_inserter(external_memory.name_char_data));
external_memory.name_lengths.push_back(name_length);
string_map.insert(std::make_pair(parsed_way.name, name_id)); string_map.insert(std::make_pair(parsed_way.name, name_id));
} }
else else
@ -169,7 +175,10 @@ void ExtractorCallbacks::ProcessWay(const osmium::Way &input_way, const Extracti
std::transform(input_way.nodes().begin(), input_way.nodes().end(), std::transform(input_way.nodes().begin(), input_way.nodes().end(),
std::back_inserter(external_memory.used_node_id_list), std::back_inserter(external_memory.used_node_id_list),
[](const osmium::NodeRef& ref) { return ref.ref(); }); [](const osmium::NodeRef &ref)
{
return OSMNodeID(ref.ref());
});
const bool is_opposite_way = TRAVEL_MODE_INACCESSIBLE == parsed_way.forward_travel_mode; const bool is_opposite_way = TRAVEL_MODE_INACCESSIBLE == parsed_way.forward_travel_mode;
@ -182,53 +191,51 @@ void ExtractorCallbacks::ProcessWay(const osmium::Way &input_way, const Extracti
[&](const osmium::NodeRef &first_node, const osmium::NodeRef &last_node) [&](const osmium::NodeRef &first_node, const osmium::NodeRef &last_node)
{ {
external_memory.all_edges_list.push_back(InternalExtractorEdge( external_memory.all_edges_list.push_back(InternalExtractorEdge(
first_node.ref(), last_node.ref(), name_id, backward_weight_data, OSMNodeID(first_node.ref()), OSMNodeID(last_node.ref()), name_id,
true, false, parsed_way.roundabout, parsed_way.is_access_restricted, backward_weight_data, true, false, parsed_way.roundabout,
parsed_way.is_access_restricted, parsed_way.is_startpoint,
parsed_way.backward_travel_mode, false)); parsed_way.backward_travel_mode, false));
}); });
external_memory.way_start_end_id_list.push_back( external_memory.way_start_end_id_list.push_back(
{ {OSMWayID(input_way.id()),
static_cast<EdgeID>(input_way.id()), OSMNodeID(input_way.nodes().back().ref()),
static_cast<NodeID>(input_way.nodes().back().ref()), OSMNodeID(input_way.nodes()[input_way.nodes().size() - 2].ref()),
static_cast<NodeID>(input_way.nodes()[input_way.nodes().size() - 2].ref()), OSMNodeID(input_way.nodes()[1].ref()),
static_cast<NodeID>(input_way.nodes()[1].ref()), OSMNodeID(input_way.nodes()[0].ref())});
static_cast<NodeID>(input_way.nodes()[0].ref())
}
);
} }
else else
{ {
const bool forward_only = split_edge || TRAVEL_MODE_INACCESSIBLE == parsed_way.backward_travel_mode; const bool forward_only =
split_edge || TRAVEL_MODE_INACCESSIBLE == parsed_way.backward_travel_mode;
osrm::for_each_pair(input_way.nodes().cbegin(), input_way.nodes().cend(), osrm::for_each_pair(input_way.nodes().cbegin(), input_way.nodes().cend(),
[&](const osmium::NodeRef &first_node, const osmium::NodeRef &last_node) [&](const osmium::NodeRef &first_node, const osmium::NodeRef &last_node)
{ {
external_memory.all_edges_list.push_back(InternalExtractorEdge( external_memory.all_edges_list.push_back(InternalExtractorEdge(
first_node.ref(), last_node.ref(), name_id, forward_weight_data, OSMNodeID(first_node.ref()), OSMNodeID(last_node.ref()), name_id, forward_weight_data,
true, !forward_only, parsed_way.roundabout, parsed_way.is_access_restricted, true, !forward_only, parsed_way.roundabout,
parsed_way.forward_travel_mode, split_edge)); parsed_way.is_access_restricted, parsed_way.is_startpoint, parsed_way.forward_travel_mode,
split_edge));
}); });
if (split_edge) if (split_edge)
{ {
BOOST_ASSERT(parsed_way.backward_travel_mode != TRAVEL_MODE_INACCESSIBLE); BOOST_ASSERT(parsed_way.backward_travel_mode != TRAVEL_MODE_INACCESSIBLE);
osrm::for_each_pair(input_way.nodes().cbegin(), input_way.nodes().cend(), osrm::for_each_pair(
[&](const osmium::NodeRef &first_node, const osmium::NodeRef &last_node) input_way.nodes().cbegin(), input_way.nodes().cend(),
{ [&](const osmium::NodeRef &first_node, const osmium::NodeRef &last_node)
external_memory.all_edges_list.push_back(InternalExtractorEdge( {
first_node.ref(), last_node.ref(), name_id, backward_weight_data, external_memory.all_edges_list.push_back(InternalExtractorEdge(
false, true, parsed_way.roundabout, parsed_way.is_access_restricted, OSMNodeID(first_node.ref()), OSMNodeID(last_node.ref()), name_id, backward_weight_data, false,
parsed_way.backward_travel_mode, true)); true, parsed_way.roundabout, parsed_way.is_access_restricted,
}); parsed_way.is_startpoint, parsed_way.backward_travel_mode, true));
});
} }
external_memory.way_start_end_id_list.push_back( external_memory.way_start_end_id_list.push_back(
{ {OSMWayID(input_way.id()),
static_cast<EdgeID>(input_way.id()), OSMNodeID(input_way.nodes().back().ref()),
static_cast<NodeID>(input_way.nodes().back().ref()), OSMNodeID(input_way.nodes()[input_way.nodes().size() - 2].ref()),
static_cast<NodeID>(input_way.nodes()[input_way.nodes().size() - 2].ref()), OSMNodeID(input_way.nodes()[1].ref()),
static_cast<NodeID>(input_way.nodes()[1].ref()), OSMNodeID(input_way.nodes()[0].ref())});
static_cast<NodeID>(input_way.nodes()[0].ref())
}
);
} }
} }

View File

@ -28,12 +28,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef EXTRACTOR_CALLBACKS_HPP #ifndef EXTRACTOR_CALLBACKS_HPP
#define EXTRACTOR_CALLBACKS_HPP #define EXTRACTOR_CALLBACKS_HPP
#include "extraction_way.hpp"
#include "../typedefs.h" #include "../typedefs.h"
#include <boost/optional/optional_fwd.hpp>
#include <osmium/osm.hpp>
#include <variant/optional.hpp>
#include <string> #include <string>
#include <unordered_map> #include <unordered_map>
@ -42,6 +38,12 @@ struct ExternalMemoryNode;
class ExtractionContainers; class ExtractionContainers;
struct InputRestrictionContainer; struct InputRestrictionContainer;
struct ExtractionNode; struct ExtractionNode;
struct ExtractionWay;
namespace osmium
{
class Node;
class Way;
}
/** /**
* This class is uses by the extractor with the results of the * This class is uses by the extractor with the results of the
@ -66,7 +68,7 @@ class ExtractorCallbacks
void ProcessNode(const osmium::Node &current_node, const ExtractionNode &result_node); void ProcessNode(const osmium::Node &current_node, const ExtractionNode &result_node);
// warning: caller needs to take care of synchronization! // warning: caller needs to take care of synchronization!
void ProcessRestriction(const mapbox::util::optional<InputRestrictionContainer> &restriction); void ProcessRestriction(const boost::optional<InputRestrictionContainer> &restriction);
// warning: caller needs to take care of synchronization! // warning: caller needs to take care of synchronization!
void ProcessWay(const osmium::Way &current_way, const ExtractionWay &result_way); void ProcessWay(const osmium::Way &current_way, const ExtractionWay &result_way);

View File

@ -27,7 +27,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "extractor_options.hpp" #include "extractor_options.hpp"
#include "../util/git_sha.hpp" #include "util/version.hpp"
#include "../util/ini_file.hpp" #include "../util/ini_file.hpp"
#include "../util/simple_logger.hpp" #include "../util/simple_logger.hpp"
@ -42,6 +42,12 @@ ExtractorOptions::ParseArguments(int argc, char *argv[], ExtractorConfig &extrac
// declare a group of options that will be allowed only on command line // declare a group of options that will be allowed only on command line
boost::program_options::options_description generic_options("Options"); boost::program_options::options_description generic_options("Options");
generic_options.add_options()("version,v", "Show version")("help,h", "Show this help message")( generic_options.add_options()("version,v", "Show version")("help,h", "Show this help message")(
/*
* TODO: re-enable this
"restrictions,r",
boost::program_options::value<boost::filesystem::path>(&extractor_config.restrictions_path),
"Restrictions file in .osrm.restrictions format")(
*/
"config,c", boost::program_options::value<boost::filesystem::path>( "config,c", boost::program_options::value<boost::filesystem::path>(
&extractor_config.config_file_path)->default_value("extractor.ini"), &extractor_config.config_file_path)->default_value("extractor.ini"),
"Path to a configuration file."); "Path to a configuration file.");
@ -55,7 +61,20 @@ ExtractorOptions::ParseArguments(int argc, char *argv[], ExtractorConfig &extrac
"threads,t", "threads,t",
boost::program_options::value<unsigned int>(&extractor_config.requested_num_threads) boost::program_options::value<unsigned int>(&extractor_config.requested_num_threads)
->default_value(tbb::task_scheduler_init::default_num_threads()), ->default_value(tbb::task_scheduler_init::default_num_threads()),
"Number of threads to use"); "Number of threads to use")(
"generate-edge-lookup",boost::program_options::value<bool>(
&extractor_config.generate_edge_lookup)->implicit_value(true)->default_value(false),
"Generate a lookup table for internal edge-expanded-edge IDs to OSM node pairs")(
"small-component-size",
boost::program_options::value<unsigned int>(&extractor_config.small_component_size)
->default_value(1000),
"Number of nodes required before a strongly-connected-componennt is considered big (affects nearest neighbor snapping)");
#ifdef DEBUG_GEOMETRY
config_options.add_options()("debug-turns",
boost::program_options::value<std::string>(&extractor_config.debug_turns_path),
"Write out GeoJSON with turn penalty data");
#endif // DEBUG_GEOMETRY
// hidden options, will be allowed both on command line and in config file, but will not be // hidden options, will be allowed both on command line and in config file, but will not be
// shown to the user // shown to the user
@ -64,6 +83,7 @@ ExtractorOptions::ParseArguments(int argc, char *argv[], ExtractorConfig &extrac
&extractor_config.input_path), &extractor_config.input_path),
"Input file in .osm, .osm.bz2 or .osm.pbf format"); "Input file in .osm, .osm.bz2 or .osm.pbf format");
// positional option // positional option
boost::program_options::positional_options_description positional_options; boost::program_options::positional_options_description positional_options;
positional_options.add("input", 1); positional_options.add("input", 1);
@ -90,7 +110,7 @@ ExtractorOptions::ParseArguments(int argc, char *argv[], ExtractorConfig &extrac
option_variables); option_variables);
if (option_variables.count("version")) if (option_variables.count("version"))
{ {
SimpleLogger().Write() << g_GIT_DESCRIPTION; SimpleLogger().Write() << OSRM_VERSION;
return return_code::exit; return return_code::exit;
} }
@ -137,6 +157,14 @@ void ExtractorOptions::GenerateOutputFilesNames(ExtractorConfig &extractor_confi
extractor_config.restriction_file_name = input_path.string(); extractor_config.restriction_file_name = input_path.string();
extractor_config.names_file_name = input_path.string(); extractor_config.names_file_name = input_path.string();
extractor_config.timestamp_file_name = input_path.string(); extractor_config.timestamp_file_name = input_path.string();
extractor_config.geometry_output_path = input_path.string();
extractor_config.edge_output_path = input_path.string();
extractor_config.edge_graph_output_path = input_path.string();
extractor_config.node_output_path = input_path.string();
extractor_config.rtree_nodes_output_path = input_path.string();
extractor_config.rtree_leafs_output_path = input_path.string();
extractor_config.edge_segment_lookup_path = input_path.string();
extractor_config.edge_penalty_path = input_path.string();
std::string::size_type pos = extractor_config.output_file_name.find(".osm.bz2"); std::string::size_type pos = extractor_config.output_file_name.find(".osm.bz2");
if (pos == std::string::npos) if (pos == std::string::npos)
{ {
@ -159,6 +187,14 @@ void ExtractorOptions::GenerateOutputFilesNames(ExtractorConfig &extractor_confi
extractor_config.restriction_file_name.append(".osrm.restrictions"); extractor_config.restriction_file_name.append(".osrm.restrictions");
extractor_config.names_file_name.append(".osrm.names"); extractor_config.names_file_name.append(".osrm.names");
extractor_config.timestamp_file_name.append(".osrm.timestamp"); extractor_config.timestamp_file_name.append(".osrm.timestamp");
extractor_config.geometry_output_path.append(".osrm.geometry");
extractor_config.node_output_path.append(".osrm.nodes");
extractor_config.edge_output_path.append(".osrm.edges");
extractor_config.edge_graph_output_path.append(".osrm.ebg");
extractor_config.rtree_nodes_output_path.append(".osrm.ramIndex");
extractor_config.rtree_leafs_output_path.append(".osrm.fileIndex");
extractor_config.edge_segment_lookup_path.append(".osrm.edge_segment_lookup");
extractor_config.edge_penalty_path.append(".osrm.edge_penalties");
} }
else else
{ {
@ -166,6 +202,14 @@ void ExtractorOptions::GenerateOutputFilesNames(ExtractorConfig &extractor_confi
extractor_config.restriction_file_name.replace(pos, 5, ".osrm.restrictions"); extractor_config.restriction_file_name.replace(pos, 5, ".osrm.restrictions");
extractor_config.names_file_name.replace(pos, 5, ".osrm.names"); extractor_config.names_file_name.replace(pos, 5, ".osrm.names");
extractor_config.timestamp_file_name.replace(pos, 5, ".osrm.timestamp"); extractor_config.timestamp_file_name.replace(pos, 5, ".osrm.timestamp");
extractor_config.geometry_output_path.replace(pos, 5, ".osrm.geometry");
extractor_config.node_output_path.replace(pos, 5, ".osrm.nodes");
extractor_config.edge_output_path.replace(pos, 5, ".osrm.edges");
extractor_config.edge_graph_output_path.replace(pos, 5, ".osrm.ebg");
extractor_config.rtree_nodes_output_path.replace(pos, 5, ".osrm.ramIndex");
extractor_config.rtree_leafs_output_path.replace(pos, 5, ".osrm.fileIndex");
extractor_config.edge_segment_lookup_path.replace(pos,5, ".osrm.edge_segment_lookup");
extractor_config.edge_penalty_path.replace(pos,5, ".osrm.edge_penalties");
} }
} }
else else
@ -174,5 +218,13 @@ void ExtractorOptions::GenerateOutputFilesNames(ExtractorConfig &extractor_confi
extractor_config.restriction_file_name.replace(pos, 8, ".osrm.restrictions"); extractor_config.restriction_file_name.replace(pos, 8, ".osrm.restrictions");
extractor_config.names_file_name.replace(pos, 8, ".osrm.names"); extractor_config.names_file_name.replace(pos, 8, ".osrm.names");
extractor_config.timestamp_file_name.replace(pos, 8, ".osrm.timestamp"); extractor_config.timestamp_file_name.replace(pos, 8, ".osrm.timestamp");
extractor_config.geometry_output_path.replace(pos, 8, ".osrm.geometry");
extractor_config.node_output_path.replace(pos, 8, ".osrm.nodes");
extractor_config.edge_output_path.replace(pos, 8, ".osrm.edges");
extractor_config.edge_graph_output_path.replace(pos, 8, ".osrm.ebg");
extractor_config.rtree_nodes_output_path.replace(pos, 8, ".osrm.ramIndex");
extractor_config.rtree_leafs_output_path.replace(pos, 8, ".osrm.fileIndex");
extractor_config.edge_segment_lookup_path.replace(pos,8, ".osrm.edge_segment_lookup");
extractor_config.edge_penalty_path.replace(pos,8, ".osrm.edge_penalties");
} }
} }

View File

@ -50,8 +50,22 @@ struct ExtractorConfig
std::string restriction_file_name; std::string restriction_file_name;
std::string names_file_name; std::string names_file_name;
std::string timestamp_file_name; std::string timestamp_file_name;
std::string geometry_output_path;
std::string edge_output_path;
std::string edge_graph_output_path;
std::string node_output_path;
std::string rtree_nodes_output_path;
std::string rtree_leafs_output_path;
unsigned requested_num_threads; unsigned requested_num_threads;
unsigned small_component_size;
bool generate_edge_lookup;
std::string edge_penalty_path;
std::string edge_segment_lookup_path;
#ifdef DEBUG_GEOMETRY
std::string debug_turns_path;
#endif
}; };
struct ExtractorOptions struct ExtractorOptions

View File

@ -36,21 +36,22 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
struct FirstAndLastSegmentOfWay struct FirstAndLastSegmentOfWay
{ {
EdgeID way_id; OSMWayID way_id;
NodeID first_segment_source_id; OSMNodeID first_segment_source_id;
NodeID first_segment_target_id; OSMNodeID first_segment_target_id;
NodeID last_segment_source_id; OSMNodeID last_segment_source_id;
NodeID last_segment_target_id; OSMNodeID last_segment_target_id;
FirstAndLastSegmentOfWay() FirstAndLastSegmentOfWay()
: way_id(std::numeric_limits<EdgeID>::max()), : way_id(SPECIAL_OSM_WAYID),
first_segment_source_id(std::numeric_limits<NodeID>::max()), first_segment_source_id(SPECIAL_OSM_NODEID),
first_segment_target_id(std::numeric_limits<NodeID>::max()), first_segment_target_id(SPECIAL_OSM_NODEID),
last_segment_source_id(std::numeric_limits<NodeID>::max()), last_segment_source_id(SPECIAL_OSM_NODEID),
last_segment_target_id(std::numeric_limits<NodeID>::max()) last_segment_target_id(SPECIAL_OSM_NODEID)
{ {
} }
FirstAndLastSegmentOfWay(EdgeID w, NodeID fs, NodeID ft, NodeID ls, NodeID lt) FirstAndLastSegmentOfWay(OSMWayID w, OSMNodeID fs, OSMNodeID ft, OSMNodeID ls, OSMNodeID lt)
: way_id(w), first_segment_source_id(fs), first_segment_target_id(ft), : way_id(w), first_segment_source_id(fs), first_segment_target_id(ft),
last_segment_source_id(ls), last_segment_target_id(lt) last_segment_source_id(ls), last_segment_target_id(lt)
{ {
@ -58,19 +59,19 @@ struct FirstAndLastSegmentOfWay
static FirstAndLastSegmentOfWay min_value() static FirstAndLastSegmentOfWay min_value()
{ {
return {std::numeric_limits<EdgeID>::min(), return {MIN_OSM_WAYID,
std::numeric_limits<NodeID>::min(), MIN_OSM_NODEID,
std::numeric_limits<NodeID>::min(), MIN_OSM_NODEID,
std::numeric_limits<NodeID>::min(), MIN_OSM_NODEID,
std::numeric_limits<NodeID>::min()}; MIN_OSM_NODEID};
} }
static FirstAndLastSegmentOfWay max_value() static FirstAndLastSegmentOfWay max_value()
{ {
return {std::numeric_limits<EdgeID>::max(), return {MAX_OSM_WAYID,
std::numeric_limits<NodeID>::max(), MAX_OSM_NODEID,
std::numeric_limits<NodeID>::max(), MAX_OSM_NODEID,
std::numeric_limits<NodeID>::max(), MAX_OSM_NODEID,
std::numeric_limits<NodeID>::max()}; MAX_OSM_NODEID};
} }
}; };

View File

@ -63,29 +63,31 @@ struct InternalExtractorEdge
}; };
explicit InternalExtractorEdge() explicit InternalExtractorEdge()
: result(0, 0, 0, 0, false, false, false, false, : result(MIN_OSM_NODEID, MIN_OSM_NODEID, 0, 0, false, false, false, false, true,
TRAVEL_MODE_INACCESSIBLE, false) TRAVEL_MODE_INACCESSIBLE, false)
{ {
} }
explicit InternalExtractorEdge(NodeID source, explicit InternalExtractorEdge(OSMNodeID source,
NodeID target, OSMNodeID target,
NodeID name_id, NodeID name_id,
WeightData weight_data, WeightData weight_data,
bool forward, bool forward,
bool backward, bool backward,
bool roundabout, bool roundabout,
bool access_restricted, bool access_restricted,
bool startpoint,
TravelMode travel_mode, TravelMode travel_mode,
bool is_split) bool is_split)
: result(source, : result(OSMNodeID(source),
target, OSMNodeID(target),
name_id, name_id,
0, 0,
forward, forward,
backward, backward,
roundabout, roundabout,
access_restricted, access_restricted,
startpoint,
travel_mode, travel_mode,
is_split), is_split),
weight_data(std::move(weight_data)) weight_data(std::move(weight_data))
@ -93,7 +95,7 @@ struct InternalExtractorEdge
} }
// data that will be written to disk // data that will be written to disk
NodeBasedEdge result; NodeBasedEdgeWithOSM result;
// intermediate edge weight // intermediate edge weight
WeightData weight_data; WeightData weight_data;
// coordinate of the source node // coordinate of the source node
@ -101,19 +103,35 @@ struct InternalExtractorEdge
// necessary static util functions for stxxl's sorting // necessary static util functions for stxxl's sorting
static InternalExtractorEdge min_value() static InternalExtractorEdge min_osm_value()
{ {
return InternalExtractorEdge(0, 0, 0, WeightData(), false, false, false, return InternalExtractorEdge(MIN_OSM_NODEID, MIN_OSM_NODEID, 0, WeightData(), false, false, false,
false, TRAVEL_MODE_INACCESSIBLE, false); false, true, TRAVEL_MODE_INACCESSIBLE, false);
} }
static InternalExtractorEdge max_value() static InternalExtractorEdge max_osm_value()
{ {
return InternalExtractorEdge(SPECIAL_NODEID, SPECIAL_NODEID, 0, WeightData(), false, return InternalExtractorEdge(MAX_OSM_NODEID, MAX_OSM_NODEID, 0, WeightData(), false,
false, false, false, TRAVEL_MODE_INACCESSIBLE, false); false, false, false, true, TRAVEL_MODE_INACCESSIBLE, false);
} }
static InternalExtractorEdge min_internal_value()
{
auto v = min_osm_value();
v.result.source = 0;
v.result.target = 0;
return v;
}
static InternalExtractorEdge max_internal_value()
{
auto v = max_osm_value();
v.result.source = std::numeric_limits<NodeID>::max();
v.result.target = std::numeric_limits<NodeID>::max();
return v;
}
}; };
struct CmpEdgeByStartThenTargetID struct CmpEdgeByInternalStartThenInternalTargetID
{ {
using value_type = InternalExtractorEdge; using value_type = InternalExtractorEdge;
bool operator()(const InternalExtractorEdge &lhs, const InternalExtractorEdge &rhs) const bool operator()(const InternalExtractorEdge &lhs, const InternalExtractorEdge &rhs) const
@ -123,32 +141,32 @@ struct CmpEdgeByStartThenTargetID
(lhs.result.target < rhs.result.target)); (lhs.result.target < rhs.result.target));
} }
value_type max_value() { return InternalExtractorEdge::max_value(); } value_type max_value() { return InternalExtractorEdge::max_internal_value(); }
value_type min_value() { return InternalExtractorEdge::min_value(); } value_type min_value() { return InternalExtractorEdge::min_internal_value(); }
}; };
struct CmpEdgeByStartID struct CmpEdgeByOSMStartID
{ {
using value_type = InternalExtractorEdge; using value_type = InternalExtractorEdge;
bool operator()(const InternalExtractorEdge &lhs, const InternalExtractorEdge &rhs) const bool operator()(const InternalExtractorEdge &lhs, const InternalExtractorEdge &rhs) const
{ {
return lhs.result.source < rhs.result.source; return lhs.result.osm_source_id < rhs.result.osm_source_id;
} }
value_type max_value() { return InternalExtractorEdge::max_value(); } value_type max_value() { return InternalExtractorEdge::max_osm_value(); }
value_type min_value() { return InternalExtractorEdge::min_value(); } value_type min_value() { return InternalExtractorEdge::min_osm_value(); }
}; };
struct CmpEdgeByTargetID struct CmpEdgeByOSMTargetID
{ {
using value_type = InternalExtractorEdge; using value_type = InternalExtractorEdge;
bool operator()(const InternalExtractorEdge &lhs, const InternalExtractorEdge &rhs) const bool operator()(const InternalExtractorEdge &lhs, const InternalExtractorEdge &rhs) const
{ {
return lhs.result.target < rhs.result.target; return lhs.result.osm_target_id < rhs.result.osm_target_id;
} }
value_type max_value() { return InternalExtractorEdge::max_value(); } value_type max_value() { return InternalExtractorEdge::max_osm_value(); }
value_type min_value() { return InternalExtractorEdge::min_value(); } value_type min_value() { return InternalExtractorEdge::min_osm_value(); }
}; };
#endif // INTERNAL_EXTRACTOR_EDGE_HPP #endif // INTERNAL_EXTRACTOR_EDGE_HPP

View File

View File

@ -38,17 +38,20 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <boost/algorithm/string/predicate.hpp> #include <boost/algorithm/string/predicate.hpp>
#include <boost/ref.hpp> #include <boost/ref.hpp>
#include <boost/regex.hpp> #include <boost/regex.hpp>
#include <boost/optional/optional.hpp>
#include <osmium/osm.hpp>
#include <osmium/tags/regex_filter.hpp>
#include <algorithm> #include <algorithm>
#include <iterator>
namespace namespace
{ {
int lua_error_callback(lua_State *lua_state) int lua_error_callback(lua_State *lua_state)
{ {
std::string error_msg = lua_tostring(lua_state, -1); std::string error_msg = lua_tostring(lua_state, -1);
std::ostringstream error_stream; throw osrm::exception("ERROR occured in profile script:\n" + error_msg);
error_stream << error_msg;
throw osrm::exception("ERROR occured in profile script:\n" + error_stream.str());
} }
} }
@ -104,18 +107,18 @@ void RestrictionParser::ReadRestrictionExceptions(lua_State *lua_state)
/** /**
* Tries to parse an relation as turn restriction. This can fail for a number of * Tries to parse an relation as turn restriction. This can fail for a number of
* reasons, this the return type is a mapbox::util::optional<>. * reasons, this the return type is a boost::optional<T>.
* *
* Some restrictions can also be ignored: See the ```get_exceptions``` function * Some restrictions can also be ignored: See the ```get_exceptions``` function
* in the corresponding profile. * in the corresponding profile.
*/ */
mapbox::util::optional<InputRestrictionContainer> boost::optional<InputRestrictionContainer>
RestrictionParser::TryParse(const osmium::Relation &relation) const RestrictionParser::TryParse(const osmium::Relation &relation) const
{ {
// return if turn restrictions should be ignored // return if turn restrictions should be ignored
if (!use_turn_restrictions) if (!use_turn_restrictions)
{ {
return mapbox::util::optional<InputRestrictionContainer>(); return {};
} }
osmium::tags::KeyPrefixFilter filter(false); osmium::tags::KeyPrefixFilter filter(false);
@ -129,14 +132,14 @@ RestrictionParser::TryParse(const osmium::Relation &relation) const
// if it's a restriction, continue; // if it's a restriction, continue;
if (std::distance(fi_begin, fi_end) == 0) if (std::distance(fi_begin, fi_end) == 0)
{ {
return mapbox::util::optional<InputRestrictionContainer>(); return {};
} }
// check if the restriction should be ignored // check if the restriction should be ignored
const char *except = relation.get_value_by_key("except"); const char *except = relation.get_value_by_key("except");
if (except != nullptr && ShouldIgnoreRestriction(except)) if (except != nullptr && ShouldIgnoreRestriction(except))
{ {
return mapbox::util::optional<InputRestrictionContainer>(); return {};
} }
bool is_only_restriction = false; bool is_only_restriction = false;
@ -164,7 +167,7 @@ RestrictionParser::TryParse(const osmium::Relation &relation) const
if (!is_actually_restricted) if (!is_actually_restricted)
{ {
return mapbox::util::optional<InputRestrictionContainer>(); return {};
} }
} }
} }
@ -218,7 +221,7 @@ RestrictionParser::TryParse(const osmium::Relation &relation) const
break; break;
} }
} }
return mapbox::util::optional<InputRestrictionContainer>(restriction_container); return boost::make_optional(std::move(restriction_container));
} }
bool RestrictionParser::ShouldIgnoreRestriction(const std::string &except_tag_string) const bool RestrictionParser::ShouldIgnoreRestriction(const std::string &except_tag_string) const

View File

@ -30,16 +30,16 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../data_structures/restriction.hpp" #include "../data_structures/restriction.hpp"
#include <osmium/osm.hpp> #include <boost/optional/optional.hpp>
#include <osmium/tags/regex_filter.hpp>
#include <variant/optional.hpp>
#include <string> #include <string>
#include <vector> #include <vector>
struct lua_State; struct lua_State;
class ScriptingEnvironment; namespace osmium
{
class Relation;
}
/** /**
* Parses the relations that represents turn restrictions. * Parses the relations that represents turn restrictions.
@ -63,8 +63,7 @@ class RestrictionParser
{ {
public: public:
RestrictionParser(lua_State *lua_state); RestrictionParser(lua_State *lua_state);
mapbox::util::optional<InputRestrictionContainer> boost::optional<InputRestrictionContainer> TryParse(const osmium::Relation &relation) const;
TryParse(const osmium::Relation &relation) const;
private: private:
void ReadUseRestrictionsSetting(lua_State *lua_state); void ReadUseRestrictionsSetting(lua_State *lua_state);

View File

@ -117,6 +117,7 @@ void ScriptingEnvironment::init_lua_state(lua_State *lua_state)
.def_readwrite("name", &ExtractionWay::name) .def_readwrite("name", &ExtractionWay::name)
.def_readwrite("roundabout", &ExtractionWay::roundabout) .def_readwrite("roundabout", &ExtractionWay::roundabout)
.def_readwrite("is_access_restricted", &ExtractionWay::is_access_restricted) .def_readwrite("is_access_restricted", &ExtractionWay::is_access_restricted)
.def_readwrite("is_startpoint", &ExtractionWay::is_startpoint)
.def_readwrite("duration", &ExtractionWay::duration) .def_readwrite("duration", &ExtractionWay::duration)
.property("forward_mode", &ExtractionWay::get_forward_mode, .property("forward_mode", &ExtractionWay::get_forward_mode,
&ExtractionWay::set_forward_mode) &ExtractionWay::set_forward_mode)

View File

@ -0,0 +1,67 @@
@routing @maxspeed @car
Feature: Car - Max speed restrictions
OSRM will use 4/5 of the projected free-flow speed.
Background: Use specific speeds
Given the profile "car"
Given a grid size of 1000 meters
Scenario: Car - Advisory speed overwrites maxspeed
Given the node map
| a | b | c |
And the ways
| nodes | highway | maxspeed | maxspeed:advisory |
| ab | residential | 90 | 45 |
| bc | residential | | 45 |
When I route I should get
| from | to | route | speed |
| a | b | ab | 47 km/h +- 1 |
| b | c | bc | 47 km/h +- 1 |
Scenario: Car - Advisory speed overwrites forward maxspeed
Given the node map
| a | b | c |
And the ways
| nodes | highway | maxspeed:forward | maxspeed:advisory:forward |
| ab | residential | 90 | 45 |
| bc | residential | | 45 |
When I route I should get
| from | to | route | speed |
| a | b | ab | 47 km/h +- 1 |
| b | c | bc | 47 km/h +- 1 |
Scenario: Car - Advisory speed overwrites backwards maxspeed
Given the node map
| a | b | c |
And the ways
| nodes | highway | maxspeed:backward | maxspeed:advisory:backward |
| ab | residential | 90 | 45 |
| bc | residential | | 45 |
When I route I should get
| from | to | route | speed |
| b | a | ab | 47 km/h +- 1 |
| c | b | bc | 47 km/h +- 1 |
Scenario: Car - Directional advisory speeds play nice with eachother
Given the node map
| a | b | c |
And the ways
| nodes | highway | maxspeed:advisory | maxspeed:advisory:forward | maxspeed:advisory:backward |
| ab | residential | 90 | 45 | 60 |
| bc | residential | 90 | 60 | 45 |
When I route I should get
| from | to | route | speed |
| a | b | ab | 47 km/h +- 1 |
| b | a | ab | 59 km/h +- 1 |
| b | c | bc | 59 km/h +- 1 |
| c | b | bc | 47 km/h +- 1 |

40
features/car/mode.feature Normal file
View File

@ -0,0 +1,40 @@
@routing @car @mode
Feature: Car - Mode flag
Background:
Given the profile "car"
Scenario: Car - Mode when using a ferry
Given the node map
| a | b | |
| | c | d |
And the ways
| nodes | highway | route | duration |
| ab | primary | | |
| bc | | ferry | 0:01 |
| cd | primary | | |
When I route I should get
| from | to | route | turns | modes |
| a | d | ab,bc,cd | head,right,left,destination | 1,2,1 |
| d | a | cd,bc,ab | head,right,left,destination | 1,2,1 |
| c | a | bc,ab | head,left,destination | 2,1 |
| d | b | cd,bc | head,right,destination | 1,2 |
| a | c | ab,bc | head,right,destination | 1,2 |
| b | d | bc,cd | head,left,destination | 2,1 |
Scenario: Car - Snapping when using a ferry
Given the node map
| a | b | | c | d | | e | f |
And the ways
| nodes | highway | route | duration |
| ab | primary | | |
| bcde | | ferry | 0:10 |
| ef | primary | | |
When I route I should get
| from | to | route | turns | modes | time |
| c | d | bcde | head,destination | 2 | 600s |

View File

@ -1,197 +0,0 @@
@locate
Feature: Locate - return nearest node
Background:
Given the profile "testbot"
Scenario: Locate - two ways crossing
Given the node map
| | | 0 | c | 1 | | |
| | | | | | | |
| 7 | | | n | | | 2 |
| a | | k | x | m | | b |
| 6 | | | l | | | 3 |
| | | | | | | |
| | | 5 | d | 4 | | |
And the ways
| nodes |
| axb |
| cxd |
When I request locate I should get
| in | out |
| 0 | c |
| 1 | c |
| 2 | b |
| 3 | b |
| 4 | d |
| 5 | d |
| 6 | a |
| 7 | a |
| a | a |
| b | b |
| c | c |
| d | d |
| k | x |
| l | x |
| m | x |
| n | x |
Scenario: Locate - inside a triangle
Given the node map
| | | | | | c | | | | | |
| | | | | | 7 | | | | | |
| | | | y | | | | z | | | |
| | | 5 | | 0 | | 1 | | 8 | | |
| 6 | | | 2 | | 3 | | 4 | | | 9 |
| a | | | x | | u | | w | | | b |
And the ways
| nodes |
| ab |
| bc |
| ca |
When I request locate I should get
| in | out |
| 0 | c |
| 1 | c |
| 2 | a |
| 3 | c |
| 4 | b |
| 5 | a |
| 6 | a |
| 7 | c |
| 8 | b |
| 9 | b |
| x | a |
| y | c |
| z | c |
| w | b |
Scenario: Nearest - easy-west way
Given the node map
| 3 | 4 | | 5 | 6 |
| 2 | a | x | b | 7 |
| 1 | 0 | | 9 | 8 |
And the ways
| nodes |
| ab |
When I request locate I should get
| in | out |
| 0 | a |
| 1 | a |
| 2 | a |
| 3 | a |
| 4 | a |
| 5 | b |
| 6 | b |
| 7 | b |
| 8 | b |
| 9 | b |
Scenario: Nearest - north-south way
Given the node map
| 1 | 2 | 3 |
| 0 | a | 4 |
| | x | |
| 9 | b | 5 |
| 8 | 7 | 6 |
And the ways
| nodes |
| ab |
When I request locate I should get
| in | out |
| 0 | a |
| 1 | a |
| 2 | a |
| 3 | a |
| 4 | a |
| 5 | b |
| 6 | b |
| 7 | b |
| 8 | b |
| 9 | b |
Scenario: Nearest - diagonal 1
Given the node map
| 2 | | 3 | | | |
| | a | | 4 | | |
| 1 | | x | | 5 | |
| | 0 | | y | | 6 |
| | | 9 | | b | |
| | | | 8 | | 7 |
And the ways
| nodes |
| axyb |
When I request locate I should get
| in | out |
| 0 | x |
| 1 | a |
| 2 | a |
| 3 | a |
| 4 | x |
| 5 | y |
| 6 | b |
| 7 | b |
| 8 | b |
| 9 | y |
| a | a |
| b | b |
| x | x |
| y | y |
Scenario: Nearest - diagonal 2
Given the node map
| | | | 6 | | 7 |
| | | 5 | | b | |
| | 4 | | y | | 8 |
| 3 | | x | | 9 | |
| | a | | 0 | | |
| 2 | | 1 | | | |
And the ways
| nodes |
| ab |
When I request nearest I should get
| in | out |
| 0 | x |
| 1 | a |
| 2 | a |
| 3 | a |
| 4 | x |
| 5 | y |
| 6 | b |
| 7 | b |
| 8 | b |
| 9 | y |
| a | a |
| b | b |
| x | x |
| y | y |
Scenario: Locate - High lat/lon
Given the node locations
| node | lat | lon |
| a | -85 | -180 |
| b | 0 | 0 |
| c | 85 | 180 |
| x | -84 | -180 |
| y | 84 | 180 |
And the ways
| nodes |
| abc |
When I request locate I should get
| in | out |
| x | a |
| y | c |

View File

@ -15,7 +15,9 @@ Feature: osrm-extract command line options: help
And stdout should contain "Configuration:" And stdout should contain "Configuration:"
And stdout should contain "--profile" And stdout should contain "--profile"
And stdout should contain "--threads" And stdout should contain "--threads"
And stdout should contain 12 lines And stdout should contain "--generate-edge-lookup"
And stdout should contain "--small-component-size"
And stdout should contain 20 lines
And it should exit with code 0 And it should exit with code 0
Scenario: osrm-extract - Help, short Scenario: osrm-extract - Help, short
@ -29,7 +31,9 @@ Feature: osrm-extract command line options: help
And stdout should contain "Configuration:" And stdout should contain "Configuration:"
And stdout should contain "--profile" And stdout should contain "--profile"
And stdout should contain "--threads" And stdout should contain "--threads"
And stdout should contain 12 lines And stdout should contain "--generate-edge-lookup"
And stdout should contain "--small-component-size"
And stdout should contain 20 lines
And it should exit with code 0 And it should exit with code 0
Scenario: osrm-extract - Help, long Scenario: osrm-extract - Help, long
@ -43,5 +47,7 @@ Feature: osrm-extract command line options: help
And stdout should contain "Configuration:" And stdout should contain "Configuration:"
And stdout should contain "--profile" And stdout should contain "--profile"
And stdout should contain "--threads" And stdout should contain "--threads"
And stdout should contain 12 lines And stdout should contain "--generate-edge-lookup"
And stdout should contain "--small-component-size"
And stdout should contain 20 lines
And it should exit with code 0 And it should exit with code 0

View File

@ -13,11 +13,12 @@ Feature: osrm-prepare command line options: help
And stdout should contain "--help" And stdout should contain "--help"
And stdout should contain "--config" And stdout should contain "--config"
And stdout should contain "Configuration:" And stdout should contain "Configuration:"
And stdout should contain "--restrictions"
And stdout should contain "--profile" And stdout should contain "--profile"
And stdout should contain "--threads" And stdout should contain "--threads"
And stdout should contain "--core" And stdout should contain "--core"
And stdout should contain 17 lines And stdout should contain "--level-cache"
And stdout should contain "--segment-speed-file"
And stdout should contain 21 lines
And it should exit with code 1 And it should exit with code 1
Scenario: osrm-prepare - Help, short Scenario: osrm-prepare - Help, short
@ -29,11 +30,12 @@ Feature: osrm-prepare command line options: help
And stdout should contain "--help" And stdout should contain "--help"
And stdout should contain "--config" And stdout should contain "--config"
And stdout should contain "Configuration:" And stdout should contain "Configuration:"
And stdout should contain "--restrictions"
And stdout should contain "--profile" And stdout should contain "--profile"
And stdout should contain "--threads" And stdout should contain "--threads"
And stdout should contain "--core" And stdout should contain "--core"
And stdout should contain 17 lines And stdout should contain "--level-cache"
And stdout should contain "--segment-speed-file"
And stdout should contain 21 lines
And it should exit with code 0 And it should exit with code 0
Scenario: osrm-prepare - Help, long Scenario: osrm-prepare - Help, long
@ -45,9 +47,10 @@ Feature: osrm-prepare command line options: help
And stdout should contain "--help" And stdout should contain "--help"
And stdout should contain "--config" And stdout should contain "--config"
And stdout should contain "Configuration:" And stdout should contain "Configuration:"
And stdout should contain "--restrictions"
And stdout should contain "--profile" And stdout should contain "--profile"
And stdout should contain "--threads" And stdout should contain "--threads"
And stdout should contain "--core" And stdout should contain "--core"
And stdout should contain 17 lines And stdout should contain "--level-cache"
And stdout should contain "--segment-speed-file"
And stdout should contain 21 lines
And it should exit with code 0 And it should exit with code 0

View File

@ -25,9 +25,11 @@ Feature: osrm-routed command line options: help
And stdout should contain "--port" And stdout should contain "--port"
And stdout should contain "--threads" And stdout should contain "--threads"
And stdout should contain "--shared-memory" And stdout should contain "--shared-memory"
And stdout should contain "--max-viaroute-size"
And stdout should contain "--max-trip-size"
And stdout should contain "--max-table-size" And stdout should contain "--max-table-size"
And stdout should contain "--max-matching-size" And stdout should contain "--max-matching-size"
And stdout should contain 26 lines And stdout should contain 30 lines
And it should exit with code 0 And it should exit with code 0
Scenario: osrm-routed - Help, short Scenario: osrm-routed - Help, short
@ -51,9 +53,11 @@ Feature: osrm-routed command line options: help
And stdout should contain "--port" And stdout should contain "--port"
And stdout should contain "--threads" And stdout should contain "--threads"
And stdout should contain "--shared-memory" And stdout should contain "--shared-memory"
And stdout should contain "--max-viaroute-size"
And stdout should contain "--max-trip-size"
And stdout should contain "--max-table-size" And stdout should contain "--max-table-size"
And stdout should contain "--max-matching-size" And stdout should contain "--max-matching-size"
And stdout should contain 26 lines And stdout should contain 30 lines
And it should exit with code 0 And it should exit with code 0
Scenario: osrm-routed - Help, long Scenario: osrm-routed - Help, long
@ -77,7 +81,9 @@ Feature: osrm-routed command line options: help
And stdout should contain "--port" And stdout should contain "--port"
And stdout should contain "--threads" And stdout should contain "--threads"
And stdout should contain "--shared-memory" And stdout should contain "--shared-memory"
And stdout should contain "--max-trip-size"
And stdout should contain "--max-table-size"
And stdout should contain "--max-table-size" And stdout should contain "--max-table-size"
And stdout should contain "--max-matching-size" And stdout should contain "--max-matching-size"
And stdout should contain 26 lines And stdout should contain 30 lines
And it should exit with code 0 And it should exit with code 0

View File

@ -6,6 +6,10 @@ Given(/^the import format "(.*?)"$/) do |format|
set_input_format format set_input_format format
end end
Given /^the extract extra arguments "(.*?)"$/ do |args|
set_extract_args args
end
Given /^a grid size of (\d+) meters$/ do |meters| Given /^a grid size of (\d+) meters$/ do |meters|
set_grid_size meters set_grid_size meters
end end
@ -28,7 +32,7 @@ Given /^the node map$/ do |table|
raise "*** invalid node name '#{name}', must me alphanumeric" unless name.match /[a-z0-9]/ raise "*** invalid node name '#{name}', must me alphanumeric" unless name.match /[a-z0-9]/
if name.match /[a-z]/ if name.match /[a-z]/
raise "*** duplicate node '#{name}'" if name_node_hash[name] raise "*** duplicate node '#{name}'" if name_node_hash[name]
add_osm_node name, *table_coord_to_lonlat(ci,ri) add_osm_node name, *table_coord_to_lonlat(ci,ri), nil
else else
raise "*** duplicate node '#{name}'" if location_hash[name] raise "*** duplicate node '#{name}'" if location_hash[name]
add_location name, *table_coord_to_lonlat(ci,ri) add_location name, *table_coord_to_lonlat(ci,ri)
@ -43,7 +47,9 @@ Given /^the node locations$/ do |table|
name = row['node'] name = row['node']
raise "*** duplicate node '#{name}'" if find_node_by_name name raise "*** duplicate node '#{name}'" if find_node_by_name name
if name.match /[a-z]/ if name.match /[a-z]/
add_osm_node name, row['lon'].to_f, row['lat'].to_f id = row['id']
id = id.to_i if id
add_osm_node name, row['lon'].to_f, row['lat'].to_f, id
else else
add_location name, row['lon'].to_f, row['lat'].to_f add_location name, row['lon'].to_f, row['lat'].to_f
end end

View File

@ -1,19 +1,29 @@
When /^I request a travel time matrix I should get$/ do |table| When /^I request a travel time matrix I should get$/ do |table|
no_route = 2147483647 # MAX_INT no_route = 2147483647 # MAX_INT
raise "*** Top-left cell of matrix table must be empty" unless table.headers[0]=="" raise "*** Top-left cell of matrix table must be empty" unless table.headers[0]==""
nodes = [] waypoints = []
column_headers = table.headers[1..-1] column_headers = table.headers[1..-1]
row_headers = table.rows.map { |h| h.first } row_headers = table.rows.map { |h| h.first }
unless column_headers==row_headers symmetric = Set.new(column_headers) == Set.new(row_headers)
raise "*** Column and row headers must match in matrix table, got #{column_headers.inspect} and #{row_headers.inspect}" if symmetric then
end column_headers.each do |node_name|
column_headers.each do |node_name| node = find_node_by_name(node_name)
node = find_node_by_name(node_name) raise "*** unknown node '#{node_name}" unless node
raise "*** unknown node '#{node_name}" unless node waypoints << {:coord => node, :type => "loc"}
nodes << node end
else
column_headers.each do |node_name|
node = find_node_by_name(node_name)
raise "*** unknown node '#{node_name}" unless node
waypoints << {:coord => node, :type => "dst"}
end
row_headers.each do |node_name|
node = find_node_by_name(node_name)
raise "*** unknown node '#{node_name}" unless node
waypoints << {:coord => node, :type => "src"}
end
end end
reprocess reprocess
@ -23,18 +33,18 @@ When /^I request a travel time matrix I should get$/ do |table|
# compute matrix # compute matrix
params = @query_params params = @query_params
response = request_table nodes, params response = request_table waypoints, params
if response.body.empty? == false if response.body.empty? == false
json = JSON.parse response.body json_result = JSON.parse response.body
result = json['distance_table'] result = json_result["distance_table"]
end end
# compare actual and expected result, one row at a time # compare actual and expected result, one row at a time
table.rows.each_with_index do |row,ri| table.rows.each_with_index do |row,ri|
# fuzzy match # fuzzy match
ok = true ok = true
0.upto(nodes.size-1) do |i| 0.upto(result[ri].size-1) do |i|
if FuzzyMatch.match result[ri][i], row[i+1] if FuzzyMatch.match result[ri][i], row[i+1]
result[ri][i] = row[i+1] result[ri][i] = row[i+1]
elsif row[i+1]=="" and result[ri][i]==no_route elsif row[i+1]=="" and result[ri][i]==no_route

View File

@ -1,51 +0,0 @@
When /^I request locate I should get$/ do |table|
reprocess
actual = []
OSRMLoader.load(self,"#{prepared_file}.osrm") do
table.hashes.each_with_index do |row,ri|
in_node = find_node_by_name row['in']
raise "*** unknown in-node '#{row['in']}" unless in_node
out_node = find_node_by_name row['out']
raise "*** unknown out-node '#{row['out']}" unless out_node
response = request_locate(in_node)
if response.code == "200" && response.body.empty? == false
json = JSON.parse response.body
if json['status'] == 0
coord = json['mapped_coordinate']
end
end
got = {'in' => row['in'], 'out' => coord }
ok = true
row.keys.each do |key|
if key=='out'
if FuzzyMatch.match_location coord, out_node
got[key] = row[key]
else
row[key] = "#{row[key]} [#{out_node.lat},#{out_node.lon}]"
ok = false
end
end
end
unless ok
failed = { :attempt => 'locate', :query => @query, :response => response }
log_fail row,got,[failed]
end
actual << got
end
end
table.diff! actual
end
When /^I request locate (\d+) times I should get$/ do |n,table|
ok = true
n.to_i.times do
ok = false unless step "I request locate I should get", table
end
ok
end

View File

@ -8,6 +8,17 @@ When /^I match I should get$/ do |table|
response = request_url row['request'] response = request_url row['request']
else else
params = @query_params params = @query_params
got = {}
row.each_pair do |k,v|
if k =~ /param:(.*)/
if v=='(nil)'
params[$1]=nil
elsif v!=nil
params[$1]=[v]
end
got[k]=v
end
end
trace = [] trace = []
timestamps = [] timestamps = []
if row['trace'] if row['trace']
@ -19,24 +30,13 @@ When /^I match I should get$/ do |table|
if row['timestamps'] if row['timestamps']
timestamps = row['timestamps'].split(" ").compact.map { |t| t.to_i} timestamps = row['timestamps'].split(" ").compact.map { |t| t.to_i}
end end
got = {'trace' => row['trace'] } got = got.merge({'trace' => row['trace'] })
response = request_matching trace, timestamps, params response = request_matching trace, timestamps, params
else else
raise "*** no trace" raise "*** no trace"
end end
end end
row.each_pair do |k,v|
if k =~ /param:(.*)/
if v=='(nil)'
params[$1]=nil
elsif v!=nil
params[$1]=v
end
got[k]=v
end
end
if response.body.empty? == false if response.body.empty? == false
json = JSON.parse response.body json = JSON.parse response.body
end end
@ -52,178 +52,35 @@ When /^I match I should get$/ do |table|
end end
sub_matchings = [] sub_matchings = []
turns = ''
route = ''
duration = ''
if response.code == "200" if response.code == "200"
if table.headers.include? 'matchings' if table.headers.include? 'matchings'
sub_matchings = json['matchings'].compact.map { |sub| sub['matched_points']} sub_matchings = json['matchings'].compact.map { |sub| sub['matched_points']}
end end
end if table.headers.include? 'turns'
raise "*** Checking turns only support for matchings with one subtrace" unless json['matchings'].size == 1
ok = true
encoded_result = ""
extended_target = ""
row['matchings'].split(',').each_with_index do |sub, sub_idx|
if sub_idx >= sub_matchings.length
ok = false
break
end
sub.length.times do |node_idx|
node = find_node_by_name(sub[node_idx])
out_node = sub_matchings[sub_idx][node_idx]
if FuzzyMatch.match_location out_node, node
encoded_result += sub[node_idx]
extended_target += sub[node_idx]
else
encoded_result += "? [#{out_node[0]},#{out_node[1]}]"
extended_target += "#{sub[node_idx]} [#{node.lat},#{node.lon}]"
ok = false
end
end
end
if ok
got['matchings'] = row['matchings']
got['timestamps'] = row['timestamps']
else
got['matchings'] = encoded_result
row['matchings'] = extended_target
log_fail row,got, { 'matching' => {:query => @query, :response => response} }
end
actual << got
end
end
table.diff! actual
end
When /^I match with turns I should get$/ do |table|
reprocess
actual = []
OSRMLoader.load(self,"#{prepared_file}.osrm") do
table.hashes.each_with_index do |row,ri|
if row['request']
got = {'request' => row['request'] }
response = request_url row['request']
else
params = @query_params
trace = []
timestamps = []
if row['from'] and row['to']
node = find_node_by_name(row['from'])
raise "*** unknown from-node '#{row['from']}" unless node
trace << node
node = find_node_by_name(row['to'])
raise "*** unknown to-node '#{row['to']}" unless node
trace << node
got = {'from' => row['from'], 'to' => row['to'] }
response = request_matching trace, timestamps, params
elsif row['trace']
row['trace'].each_char do |n|
node = find_node_by_name(n.strip)
raise "*** unknown waypoint node '#{n.strip}" unless node
trace << node
end
if row['timestamps']
timestamps = row['timestamps'].split(" ").compact.map { |t| t.to_i}
end
got = {'trace' => row['trace'] }
response = request_matching trace, timestamps, params
else
raise "*** no trace"
end
end
row.each_pair do |k,v|
if k =~ /param:(.*)/
if v=='(nil)'
params[$1]=nil
elsif v!=nil
params[$1]=v
end
got[k]=v
end
end
if response.body.empty? == false
json = JSON.parse response.body
end
if response.body.empty? == false
if response.code == "200"
instructions = way_list json['matchings'][0]['instructions']
bearings = bearing_list json['matchings'][0]['instructions']
compasses = compass_list json['matchings'][0]['instructions']
turns = turn_list json['matchings'][0]['instructions'] turns = turn_list json['matchings'][0]['instructions']
modes = mode_list json['matchings'][0]['instructions']
times = time_list json['matchings'][0]['instructions']
distances = distance_list json['matchings'][0]['instructions']
end end
end if table.headers.include? 'route'
raise "*** Checking route only support for matchings with one subtrace" unless json['matchings'].size == 1
if table.headers.include? 'status' route = way_list json['matchings'][0]['instructions']
got['status'] = json['status'].to_s if table.headers.include? 'duration'
end raise "*** Checking duration only support for matchings with one subtrace" unless json['matchings'].size == 1
if table.headers.include? 'message' duration = json['matchings'][0]['route_summary']['total_time']
got['message'] = json['status_message'] end
end end
if table.headers.include? '#' # comment column
got['#'] = row['#'] # copy value so it always match
end end
sub_matchings = [] if table.headers.include? 'turns'
if response.code == "200" got['turns'] = turns
if table.headers.include? 'matchings' end
sub_matchings = json['matchings'].compact.map { |sub| sub['matched_points']} if table.headers.include? 'route'
got['route'] = route
got['route'] = (instructions || '').strip end
if table.headers.include?('distance') if table.headers.include? 'duration'
if row['distance']!='' got['duration'] = duration.to_s
raise "*** Distance must be specied in meters. (ex: 250m)" unless row['distance'] =~ /\d+m/
end
got['distance'] = instructions ? "#{json['route_summary']['total_distance'].to_s}m" : ''
end
if table.headers.include?('time')
raise "*** Time must be specied in seconds. (ex: 60s)" unless row['time'] =~ /\d+s/
got['time'] = instructions ? "#{json['route_summary']['total_time'].to_s}s" : ''
end
if table.headers.include?('speed')
if row['speed'] != '' && instructions
raise "*** Speed must be specied in km/h. (ex: 50 km/h)" unless row['speed'] =~ /\d+ km\/h/
time = json['route_summary']['total_time']
distance = json['route_summary']['total_distance']
speed = time>0 ? (3.6*distance/time).to_i : nil
got['speed'] = "#{speed} km/h"
else
got['speed'] = ''
end
end
if table.headers.include? 'bearing'
got['bearing'] = instructions ? bearings : ''
end
if table.headers.include? 'compass'
got['compass'] = instructions ? compasses : ''
end
if table.headers.include? 'turns'
got['turns'] = instructions ? turns : ''
end
if table.headers.include? 'modes'
got['modes'] = instructions ? modes : ''
end
if table.headers.include? 'times'
got['times'] = instructions ? times : ''
end
if table.headers.include? 'distances'
got['distances'] = instructions ? distances : ''
end
end
if table.headers.include? 'start'
got['start'] = instructions ? json['route_summary']['start_point'] : nil
end
if table.headers.include? 'end'
got['end'] = instructions ? json['route_summary']['end_point'] : nil
end
if table.headers.include? 'geometry'
got['geometry'] = json['route_geometry']
end
end end
ok = true ok = true
@ -248,8 +105,12 @@ When /^I match with turns I should get$/ do |table|
end end
end end
if ok if ok
got['matchings'] = row['matchings'] if table.headers.include? 'matchings'
got['timestamps'] = row['timestamps'] got['matchings'] = row['matchings']
end
if table.headers.include? 'timestamps'
got['timestamps'] = row['timestamps']
end
else else
got['matchings'] = encoded_result got['matchings'] = encoded_result
row['matchings'] = extended_target row['matchings'] = extended_target

View File

@ -9,10 +9,10 @@ When /^I request nearest I should get$/ do |table|
out_node = find_node_by_name row['out'] out_node = find_node_by_name row['out']
raise "*** unknown out-node '#{row['out']}" unless out_node raise "*** unknown out-node '#{row['out']}" unless out_node
response = request_nearest(in_node) response = request_nearest in_node, @query_params
if response.code == "200" && response.body.empty? == false if response.code == "200" && response.body.empty? == false
json = JSON.parse response.body json = JSON.parse response.body
if json['status'] == 0 if json['status'] == 200
coord = json['mapped_coordinate'] coord = json['mapped_coordinate']
end end
end end

View File

@ -53,5 +53,5 @@ Then /^stdout should contain (\d+) lines?$/ do |lines|
end end
Given (/^the query options$/) do |table| Given (/^the query options$/) do |table|
@query_params = table.rows_hash table.rows_hash.each { |k,v| @query_params << [k, v] }
end end

View File

@ -1,7 +1,7 @@
When /^I request \/(.*)$/ do |path| When /^I request \/(.*)$/ do |path|
reprocess reprocess
OSRMLoader.load(self,"#{prepared_file}.osrm") do OSRMLoader.load(self,"#{prepared_file}.osrm") do
@response = request_path path @response = request_path path, []
end end
end end

View File

@ -4,12 +4,12 @@ def test_routability_row i
a = Location.new @origin[0]+(1+WAY_SPACING*i)*@zoom, @origin[1] a = Location.new @origin[0]+(1+WAY_SPACING*i)*@zoom, @origin[1]
b = Location.new @origin[0]+(3+WAY_SPACING*i)*@zoom, @origin[1] b = Location.new @origin[0]+(3+WAY_SPACING*i)*@zoom, @origin[1]
r = {} r = {}
r[:response] = request_route (direction=='forw' ? [a,b] : [b,a]), @query_params r[:response] = request_route (direction=='forw' ? [a,b] : [b,a]), [], @query_params
r[:query] = @query r[:query] = @query
r[:json] = JSON.parse(r[:response].body) r[:json] = JSON.parse(r[:response].body)
r[:status] = route_status r[:response] r[:status] = (route_status r[:response]) == 200 ? 'x' : nil
if r[:status].empty? == false if r[:status] then
r[:route] = way_list r[:json]['route_instructions'] r[:route] = way_list r[:json]['route_instructions']
if r[:route]=="w#{i}" if r[:route]=="w#{i}"

View File

@ -7,8 +7,26 @@ When /^I route I should get$/ do |table|
got = {'request' => row['request'] } got = {'request' => row['request'] }
response = request_url row['request'] response = request_url row['request']
else else
params = @query_params default_params = @query_params
user_params = []
got = {}
row.each_pair do |k,v|
if k =~ /param:(.*)/
if v=='(nil)'
user_params << [$1, nil]
elsif v!=nil
user_params << [$1, v]
end
got[k]=v
end
end
params = overwrite_params default_params, user_params
waypoints = [] waypoints = []
bearings = []
if row['bearings']
got['bearings'] = row['bearings']
bearings = row['bearings'].split(' ').compact
end
if row['from'] and row['to'] if row['from'] and row['to']
node = find_node_by_name(row['from']) node = find_node_by_name(row['from'])
raise "*** unknown from-node '#{row['from']}" unless node raise "*** unknown from-node '#{row['from']}" unless node
@ -18,38 +36,27 @@ When /^I route I should get$/ do |table|
raise "*** unknown to-node '#{row['to']}" unless node raise "*** unknown to-node '#{row['to']}" unless node
waypoints << node waypoints << node
got = {'from' => row['from'], 'to' => row['to'] } got = got.merge({'from' => row['from'], 'to' => row['to'] })
response = request_route waypoints, params response = request_route waypoints, bearings, params
elsif row['waypoints'] elsif row['waypoints']
row['waypoints'].split(',').each do |n| row['waypoints'].split(',').each do |n|
node = find_node_by_name(n.strip) node = find_node_by_name(n.strip)
raise "*** unknown waypoint node '#{n.strip}" unless node raise "*** unknown waypoint node '#{n.strip}" unless node
waypoints << node waypoints << node
end end
got = {'waypoints' => row['waypoints'] } got = got.merge({'waypoints' => row['waypoints'] })
response = request_route waypoints, params response = request_route waypoints, bearings, params
else else
raise "*** no waypoints" raise "*** no waypoints"
end end
end end
row.each_pair do |k,v|
if k =~ /param:(.*)/
if v=='(nil)'
params[$1]=nil
elsif v!=nil
params[$1]=v
end
got[k]=v
end
end
if response.body.empty? == false if response.body.empty? == false
json = JSON.parse response.body json = JSON.parse response.body
end end
if response.body.empty? == false if response.body.empty? == false
if json['status'] == 0 if json['status'] == 200
instructions = way_list json['route_instructions'] instructions = way_list json['route_instructions']
bearings = bearing_list json['route_instructions'] bearings = bearing_list json['route_instructions']
compasses = compass_list json['route_instructions'] compasses = compass_list json['route_instructions']
@ -70,62 +77,64 @@ When /^I route I should get$/ do |table|
got['#'] = row['#'] # copy value so it always match got['#'] = row['#'] # copy value so it always match
end end
if response.code == "200" if table.headers.include? 'start'
if table.headers.include? 'start' got['start'] = instructions ? json['route_summary']['start_point'] : nil
got['start'] = instructions ? json['route_summary']['start_point'] : nil end
end if table.headers.include? 'end'
if table.headers.include? 'end' got['end'] = instructions ? json['route_summary']['end_point'] : nil
got['end'] = instructions ? json['route_summary']['end_point'] : nil end
end if table.headers.include? 'geometry'
if table.headers.include? 'geometry' got['geometry'] = json['route_geometry']
got['geometry'] = json['route_geometry'] end
end if table.headers.include? 'route'
if table.headers.include? 'route' got['route'] = (instructions || '').strip
got['route'] = (instructions || '').strip if table.headers.include?('alternative')
if table.headers.include?('alternative') got['alternative'] =
raise "*** No alternative found ***" unless json['found_alternative'] if json['found_alternative']
got['alternative'] = way_list json['alternative_instructions'].first way_list json['alternative_instructions'].first
end
if table.headers.include?('distance')
if row['distance']!=''
raise "*** Distance must be specied in meters. (ex: 250m)" unless row['distance'] =~ /\d+m/
end
got['distance'] = instructions ? "#{json['route_summary']['total_distance'].to_s}m" : ''
end
if table.headers.include?('time')
raise "*** Time must be specied in seconds. (ex: 60s)" unless row['time'] =~ /\d+s/
got['time'] = instructions ? "#{json['route_summary']['total_time'].to_s}s" : ''
end
if table.headers.include?('speed')
if row['speed'] != '' && instructions
raise "*** Speed must be specied in km/h. (ex: 50 km/h)" unless row['speed'] =~ /\d+ km\/h/
time = json['route_summary']['total_time']
distance = json['route_summary']['total_distance']
speed = time>0 ? (3.6*distance/time).round : nil
got['speed'] = "#{speed} km/h"
else else
got['speed'] = '' ""
end end
end
if table.headers.include?('distance')
if row['distance']!=''
raise "*** Distance must be specied in meters. (ex: 250m)" unless row['distance'] =~ /\d+m/
end end
if table.headers.include? 'bearing' got['distance'] = instructions ? "#{json['route_summary']['total_distance'].to_s}m" : ''
got['bearing'] = instructions ? bearings : '' end
end if table.headers.include?('time')
if table.headers.include? 'compass' raise "*** Time must be specied in seconds. (ex: 60s)" unless row['time'] =~ /\d+s/
got['compass'] = instructions ? compasses : '' got['time'] = instructions ? "#{json['route_summary']['total_time'].to_s}s" : ''
end end
if table.headers.include? 'turns' if table.headers.include?('speed')
got['turns'] = instructions ? turns : '' if row['speed'] != '' && instructions
end raise "*** Speed must be specied in km/h. (ex: 50 km/h)" unless row['speed'] =~ /\d+ km\/h/
if table.headers.include? 'modes' time = json['route_summary']['total_time']
got['modes'] = instructions ? modes : '' distance = json['route_summary']['total_distance']
end speed = time>0 ? (3.6*distance/time).round : nil
if table.headers.include? 'times' got['speed'] = "#{speed} km/h"
got['times'] = instructions ? times : '' else
end got['speed'] = ''
if table.headers.include? 'distances'
got['distances'] = instructions ? distances : ''
end end
end end
if table.headers.include? 'bearing'
got['bearing'] = instructions ? bearings : ''
end
if table.headers.include? 'compass'
got['compass'] = instructions ? compasses : ''
end
if table.headers.include? 'turns'
got['turns'] = instructions ? turns : ''
end
if table.headers.include? 'modes'
got['modes'] = instructions ? modes : ''
end
if table.headers.include? 'times'
got['times'] = instructions ? times : ''
end
if table.headers.include? 'distances'
got['distances'] = instructions ? distances : ''
end
end end
ok = true ok = true

View File

@ -38,7 +38,7 @@ When /^I plan a trip I should get$/ do |table|
if v=='(nil)' if v=='(nil)'
params[$1]=nil params[$1]=nil
elsif v!=nil elsif v!=nil
params[$1]=v params[$1]=[v]
end end
got[k]=v got[k]=v
end end

View File

@ -10,3 +10,7 @@ end
def set_profile profile def set_profile profile
@profile = profile @profile = profile
end end
def set_extract_args args
@extract_args = args
end

View File

@ -123,8 +123,9 @@ def table_coord_to_lonlat ci,ri
[@origin[0]+ci*@zoom, @origin[1]-ri*@zoom] [@origin[0]+ci*@zoom, @origin[1]-ri*@zoom]
end end
def add_osm_node name,lon,lat def add_osm_node name,lon,lat,id
node = OSM::Node.new make_osm_id, OSM_USER, OSM_TIMESTAMP, lon, lat id = make_osm_id if id == nil
node = OSM::Node.new id, OSM_USER, OSM_TIMESTAMP, lon, lat
node << { :name => name } node << { :name => name }
node.uid = OSM_UID node.uid = OSM_UID
osm_db << node osm_db << node
@ -273,12 +274,13 @@ def extract_data
Dir.chdir TEST_FOLDER do Dir.chdir TEST_FOLDER do
log_preprocess_info log_preprocess_info
log "== Extracting #{osm_file}.osm...", :preprocess log "== Extracting #{osm_file}.osm...", :preprocess
unless system "#{BIN_PATH}/osrm-extract #{osm_file}.osm#{'.pbf' if pbf?} --profile #{PROFILES_PATH}/#{@profile}.lua >>#{PREPROCESS_LOG_FILE} 2>&1" unless system "#{BIN_PATH}/osrm-extract #{osm_file}.osm#{'.pbf' if pbf?} #{@extract_args} --profile #{PROFILES_PATH}/#{@profile}.lua >>#{PREPROCESS_LOG_FILE} 2>&1"
log "*** Exited with code #{$?.exitstatus}.", :preprocess log "*** Exited with code #{$?.exitstatus}.", :preprocess
raise ExtractError.new $?.exitstatus, "osrm-extract exited with code #{$?.exitstatus}." raise ExtractError.new $?.exitstatus, "osrm-extract exited with code #{$?.exitstatus}."
end end
begin begin
["osrm","osrm.names","osrm.restrictions"].each do |file| ["osrm","osrm.names","osrm.restrictions","osrm.ebg","osrm.edges","osrm.fileIndex","osrm.geometry","osrm.nodes","osrm.ramIndex"].each do |file|
log "Renaming #{osm_file}.#{file} to #{extracted_file}.#{file}", :preprocess
File.rename "#{osm_file}.#{file}", "#{extracted_file}.#{file}" File.rename "#{osm_file}.#{file}", "#{extracted_file}.#{file}"
end end
rescue Exception => e rescue Exception => e
@ -296,14 +298,16 @@ def prepare_data
raise PrepareError.new $?.exitstatus, "osrm-prepare exited with code #{$?.exitstatus}." raise PrepareError.new $?.exitstatus, "osrm-prepare exited with code #{$?.exitstatus}."
end end
begin begin
["osrm.hsgr","osrm.fileIndex","osrm.geometry","osrm.nodes","osrm.ramIndex","osrm.core"].each do |file| ["osrm.hsgr","osrm.fileIndex","osrm.geometry","osrm.nodes","osrm.ramIndex","osrm.core","osrm.edges"].each do |file|
log "Renaming #{extracted_file}.#{file} to #{prepared_file}.#{file}", :preprocess
File.rename "#{extracted_file}.#{file}", "#{prepared_file}.#{file}" File.rename "#{extracted_file}.#{file}", "#{prepared_file}.#{file}"
end end
rescue Exception => e rescue Exception => e
raise FileError.new nil, "failed to rename data file after preparing." raise FileError.new nil, "failed to rename data file after preparing."
end end
begin begin
["osrm.names","osrm.edges","osrm.restrictions"].each do |file| ["osrm.names","osrm.restrictions","osrm"].each do |file|
log "Copying #{extracted_file}.#{file} to #{prepared_file}.#{file}", :preprocess
FileUtils.cp "#{extracted_file}.#{file}", "#{prepared_file}.#{file}" FileUtils.cp "#{extracted_file}.#{file}", "#{prepared_file}.#{file}"
end end
rescue Exception => e rescue Exception => e

View File

@ -15,7 +15,7 @@ Before do |scenario|
end end
@load_method = DEFAULT_LOAD_METHOD @load_method = DEFAULT_LOAD_METHOD
@query_params = {} @query_params = []
@scenario_time = Time.now.strftime("%Y-%m-%dT%H:%m:%SZ") @scenario_time = Time.now.strftime("%Y-%m-%dT%H:%m:%SZ")
reset_data reset_data
@has_logged_preprocess_info = false @has_logged_preprocess_info = false

View File

@ -1,29 +1,32 @@
require 'net/http' require 'net/http'
def generate_request_url path # Converts an array [["param","val1"], ["param","val2"]] into ?param=val1&param=val2
if @http_method.eql? "POST" def params_to_url params
pos = path.index('?') - 1 kv_pairs = params.map { |kv| kv[0].to_s + "=" + kv[1].to_s }
service = path[0..pos] url = kv_pairs.size > 0 ? ("?" + kv_pairs.join("&")) : ""
uri = URI.parse "#{HOST}/#{service}" return url
else end
uri = URI.parse "#{HOST}/#{path}"
# Converts an array [["param","val1"], ["param","val2"]] into ["param"=>["val1", "val2"]]
def params_to_map params
result = {}
params.each do |pair|
if not result.has_key? pair[0]
result[pair[0]] = []
end
result[pair[0]] << [pair[1]]
end end
end end
def send_request uri, waypoints=[], options={}, timestamps=[] def send_request base_uri, parameters
@query = uri.to_s
Timeout.timeout(OSRM_TIMEOUT) do Timeout.timeout(OSRM_TIMEOUT) do
if @http_method.eql? "POST" if @http_method.eql? "POST"
datas = {} uri = URI.parse base_uri
if waypoints.length > 0 @query = uri.to_s
datas[:loc] = waypoints.compact.map { |w| "#{w.lat},#{w.lon}" } response = Net::HTTP.post_form uri, (params_to_map parameters)
end
if timestamps.length > 0
datas[:t] = timestamps.compact.map { |t| "#{t}" }
end
datas.merge! options
response = Net::HTTP.post_form uri, datas
else else
uri = URI.parse base_uri+(params_to_url parameters)
@query = uri.to_s
response = Net::HTTP.get_response uri response = Net::HTTP.get_response uri
end end
end end

View File

@ -1,12 +0,0 @@
require 'net/http'
def request_locate_url path, node
@query = path
uri = generate_request_url path
response = send_request uri, [node]
end
def request_locate node
request_locate_url "locate?loc=#{node.lat},#{node.lon}", node
end

View File

@ -1,20 +0,0 @@
require 'net/http'
HOST = "http://127.0.0.1:#{OSRM_PORT}"
def request_matching trace=[], timestamps=[], options={}
defaults = { 'output' => 'json', 'instructions' => 'true' }
locs = trace.compact.map { |w| "loc=#{w.lat},#{w.lon}" }
ts = timestamps.compact.map { |t| "t=#{t}" }
if ts.length > 0
trace_params = locs.zip(ts).map { |a| a.join('&')}
else
trace_params = locs
end
params = (trace_params + defaults.merge(options).to_param).join('&')
params = nil if params==""
uri = generate_request_url ("match" + '?' + params)
response = send_request uri, trace, options, timestamps
end

View File

@ -1,12 +0,0 @@
require 'net/http'
def request_nearest_url path, node
@query = path
uri = generate_request_url path
response = send_request uri, [node]
end
def request_nearest node
request_nearest_url "nearest?loc=#{node.lat},#{node.lon}", node
end

View File

@ -3,25 +3,10 @@ require 'net/http'
HOST = "http://127.0.0.1:#{OSRM_PORT}" HOST = "http://127.0.0.1:#{OSRM_PORT}"
DESTINATION_REACHED = 15 #OSRM instruction code DESTINATION_REACHED = 15 #OSRM instruction code
class Hash def request_path service, params
def to_param(namespace = nil) uri = "#{HOST}/" + service
collect do |key, value| response = send_request uri, params
"#{key}=#{value}" return response
end.sort
end
end
def request_path path, waypoints=[], options={}
locs = waypoints.compact.map { |w| "loc=#{w.lat},#{w.lon}" }
params = (locs + options.to_param).join('&')
params = nil if params==""
if params == nil
uri = generate_request_url (path)
else
uri = generate_request_url (path + '?' + params)
end
response = send_request uri, waypoints, options
end end
def request_url path def request_url path
@ -36,40 +21,95 @@ rescue Timeout::Error
raise "*** osrm-routed did not respond." raise "*** osrm-routed did not respond."
end end
def request_route waypoints, params={} # Overwriters the default values in defaults.
defaults = { 'output' => 'json', 'instructions' => true, 'alt' => false } # e.g. [[a, 1], [b, 2]], [[a, 5], [d, 10]] => [[a, 5], [b, 2], [d, 10]]
request_path "viaroute", waypoints, defaults.merge(params) def overwrite_params defaults, other
merged = []
defaults.each do |k,v|
idx = other.index { |p| p[0] == k }
if idx == nil then
merged << [k, v]
else
merged << [k, other[idx][1]]
end
end
other.each do |k,v|
if merged.index { |pair| pair[0] == k} == nil then
merged << [k, v]
end
end
return merged
end end
def request_table waypoints, params={} def request_route waypoints, bearings, user_params
defaults = { 'output' => 'json' } raise "*** number of bearings does not equal the number of waypoints" unless bearings.size == 0 || bearings.size == waypoints.size
request_path "table", waypoints, defaults.merge(params)
defaults = [['output','json'], ['instructions',true], ['alt',false]]
params = overwrite_params defaults, user_params
encoded_waypoint = waypoints.map{ |w| ["loc","#{w.lat},#{w.lon}"] }
if bearings.size > 0
encoded_bearings = bearings.map { |b| ["b", b.to_s]}
parasm = params.concat encoded_waypoint.zip(encoded_bearings).flatten! 1
else
params = params.concat encoded_waypoint
end
return request_path "viaroute", params
end
def request_nearest node, user_params
defaults = [['output', 'json']]
params = overwrite_params defaults, user_params
params << ["loc", "#{node.lat},#{node.lon}"]
return request_path "nearest", params
end
def request_table waypoints, user_params
defaults = [['output', 'json']]
params = overwrite_params defaults, user_params
params = params.concat waypoints.map{ |w| [w[:type],"#{w[:coord].lat},#{w[:coord].lon}"] }
return request_path "table", params
end
def request_trip waypoints, user_params
defaults = [['output', 'json']]
params = overwrite_params defaults, user_params
params = params.concat waypoints.map{ |w| ["loc","#{w.lat},#{w.lon}"] }
return request_path "trip", params
end
def request_matching waypoints, timestamps, user_params
defaults = [['output', 'json']]
params = overwrite_params defaults, user_params
encoded_waypoint = waypoints.map{ |w| ["loc","#{w.lat},#{w.lon}"] }
if timestamps.size > 0
encoded_timestamps = timestamps.map { |t| ["t", t.to_s]}
parasm = params.concat encoded_waypoint.zip(encoded_timestamps).flatten! 1
else
params = params.concat encoded_waypoint
end
return request_path "match", params
end end
def got_route? response def got_route? response
if response.code == "200" && !response.body.empty? if response.code == "200" && !response.body.empty?
json = JSON.parse response.body json = JSON.parse response.body
if json['status'] == 0 if json['status'] == 200
return way_list( json['route_instructions']).empty? == false return way_list( json['route_instructions']).empty? == false
end end
end end
false return false
end end
def route_status response def route_status response
if response.code == "200" && !response.body.empty? if response.code == "200" && !response.body.empty?
json = JSON.parse response.body json = JSON.parse response.body
if json['status'] == 0 return json['status']
if way_list( json['route_instructions']).empty?
return 'Empty route'
else
return 'x'
end
elsif json['status'] == 207
''
else
"Status #{json['status']}"
end
else else
"HTTP #{response.code}" "HTTP #{response.code}"
end end

View File

@ -1,14 +0,0 @@
require 'net/http'
HOST = "http://127.0.0.1:#{OSRM_PORT}"
def request_trip waypoints=[], params={}
defaults = { 'output' => 'json' }
locs = waypoints.compact.map { |w| "loc=#{w.lat},#{w.lon}" }
params = (locs + defaults.merge(params).to_param).join('&')
params = nil if params==""
uri = generate_request_url ("trip" + '?' + params)
response = send_request uri, waypoints, params
end

View File

@ -0,0 +1,23 @@
@testbot
Feature: Support 64bit node IDs
# Without 64bit support, this test should fail
Scenario: 64bit overflow conflicts
Given the node locations
| node | lat | lon | id |
| a | 55.660778 | 12.573909 | 1 |
| b | 55.660672 | 12.573693 | 2 |
| c | 55.660128 | 12.572546 | 3 |
| d | 55.660015 | 12.572476 | 4294967297 |
| e | 55.660119 | 12.572325 | 4294967298 |
| x | 55.660818 | 12.574051 | 4294967299 |
| y | 55.660073 | 12.574067 | 4294967300 |
And the ways
| nodes |
| abc |
| cdec |
When I route I should get
| from | to | route | turns |
| x | y | abc | head,destination |

View File

@ -0,0 +1,38 @@
@routing @testbot @alternative
Feature: Alternative route
Background:
Given the profile "testbot"
And the node map
| | b | c | d | | |
| a | | | | | z |
| | g | h | i | j | |
And the ways
| nodes |
| ab |
| bc |
| cd |
| dz |
| ag |
| gh |
| hi |
| ij |
| jz |
Scenario: Enabled alternative
Given the query options
| alt | true |
When I route I should get
| from | to | route | alternative |
| a | z | ab,bc,cd,dz | ag,gh,hi,ij,jz |
Scenario: Disabled alternative
Given the query options
| alt | false |
When I route I should get
| from | to | route | alternative |
| a | z | ab,bc,cd,dz | |

View File

@ -1,10 +1,26 @@
@routing @bearing_param @todo @testbot @routing @bearing_param @testbot
Feature: Bearing parameter Feature: Bearing parameter
Background: Background:
Given the profile "testbot" Given the profile "testbot"
And a grid size of 10 meters And a grid size of 10 meters
Scenario: Testbot - Intial bearing in simple case
Given the node map
| a | b | c | d |
And the ways
| nodes |
| ad |
When I route I should get
| from | to | bearings | route | bearing |
| b | c | 90 90 | ad | 90 |
| b | c | 180 90 | | |
| b | c | 80 100 | ad | 90 |
| b | c | 79 100 | | |
| b | c | 79,11 100 | ad | 90 |
Scenario: Testbot - Intial bearing in simple case Scenario: Testbot - Intial bearing in simple case
Given the node map Given the node map
| a | | | a | |
@ -17,13 +33,13 @@ Feature: Bearing parameter
| bc | | bc |
When I route I should get When I route I should get
| from | to | param:bearing | route | bearing | | from | to | bearings | route | bearing |
| 0 | c | 0 | bc | 45 | | 0 | c | 0 0 | | |
| 0 | c | 45 | bc | 45 | | 0 | c | 45 45 | bc | 45 ~3% |
| 0 | c | 85 | bc | 45 | | 0 | c | 85 85 | | |
| 0 | c | 95 | ac | 135 | | 0 | c | 95 95 | | |
| 0 | c | 135 | ac | 135 | | 0 | c | 135 135 | ac | 135 ~1% |
| 0 | c | 180 | ac | 135 | | 0 | c | 180 180 | | |
Scenario: Testbot - Initial bearing on split way Scenario: Testbot - Initial bearing on split way
Given the node map Given the node map
@ -38,23 +54,25 @@ Feature: Bearing parameter
| da | yes | | da | yes |
When I route I should get When I route I should get
| from | to | param:bearing | route | bearing | | from | to | bearings | route | bearing |
| 0 | b | 10 | ab | 90 | | 0 | b | 10 10 | bc | 0 |
| 0 | b | 90 | ab | 90 | | 0 | b | 90 90 | ab | 90 |
| 0 | b | 170 | ab | 90 | # The returned bearing is wrong here, it's based on the snapped
| 0 | b | 190 | cd,da,ab | 270 | # coordinates, not the acutal edge bearing. This should be
| 0 | b | 270 | cd,da,ab | 270 | # fixed one day, but it's only a problem when we snap too vias
| 0 | b | 350 | cd,da,ab | 270 | # to the same point - DP
| 1 | d | 10 | cd | 90 | #| 0 | b | 170 170 | da | 180 |
| 1 | d | 90 | cd | 90 | #| 0 | b | 189 189 | da | 180 |
| 1 | d | 170 | cd | 90 | | 0 | 1 | 90 270 | ab,bc,cd | 90,0,270 |
| 1 | d | 190 | ab,bc,cd | 270 | | 1 | d | 10 10 | bc | 0 |
| 1 | d | 270 | ab,bc,cd | 270 | | 1 | d | 90 90 | ab,bc,cd,da | 90,0,270,180 |
| 1 | d | 350 | ab,bc,cd | 270 | | 1 | 0 | 189 189 | da | 180 |
| 1 | d | 270 270 | cd | 270 |
| 1 | d | 349 349 | | |
Scenario: Testbot - Initial bearing in all direction Scenario: Testbot - Initial bearing in all direction
Given the node map Given the node map
| h | | | a | | | b | | h | | q | a | | | b |
| | | | | | | | | | | | | | | |
| | | p | i | j | | | | | | p | i | j | | |
| g | | o | 0 | k | | c | | g | | o | 0 | k | | c |
@ -82,12 +100,12 @@ Feature: Bearing parameter
| ha | yes | | ha | yes |
When I route I should get When I route I should get
| from | to | param:bearing | route | bearing | | from | to | bearings | route | bearing |
| 0 | a | 0 | ia | 0 | | 0 | q | 0 90 | ia,ab,bc,cd,de,ef,fg,gh,ha | 0,90,180,180,270,270,0,0,90 |
| 0 | a | 45 | jb,bc,cd,de,ef,fg,gh,ha | 45 | | 0 | a | 45 90 | jb,bc,cd,de,ef,fg,gh,ha | 45,180,180,270,270,0,0,90 |
| 0 | a | 90 | kc,cd,de,ef,fg,gh,ha | 90 | | 0 | q | 90 90 | kc,cd,de,ef,fg,gh,ha | 90,180,270,270,0,0,90 |
| 0 | a | 135 | ld,de,ef,fg,gh,ha | 135 | | 0 | a | 135 90 | ld,de,ef,fg,gh,ha | 135,270,270,0,0,90 |
| 0 | a | 180 | me,de,ef,fg,gh,ha | 180 | | 0 | a | 180 90 | me,ef,fg,gh,ha | 180,270,0,0,90 |
| 0 | a | 225 | nf,ef,fg,gh,ha | 225 | | 0 | a | 225 90 | nf,fg,gh,ha | 225,0,0,90 |
| 0 | a | 270 | og,gh,ha | 270 | | 0 | a | 270 90 | og,gh,ha | 270,0,90 |
| 0 | a | 315 | pn,ha | 315 | | 0 | a | 315 90 | ph,ha | 315,90 |

View File

@ -100,3 +100,82 @@ Feature: Basic Distance Matrix
| y | 500 | 0 | 300 | 200 | | y | 500 | 0 | 300 | 200 |
| d | 200 | 300 | 0 | 300 | | d | 200 | 300 | 0 | 300 |
| e | 300 | 400 | 100 | 0 | | e | 300 | 400 | 100 | 0 |
Scenario: Testbot - Travel time matrix and with only one source
Given the node map
| a | b | c |
| d | e | f |
And the ways
| nodes |
| abc |
| def |
| ad |
| be |
| cf |
When I request a travel time matrix I should get
| | a | b | e | f |
| a | 0 | 100 | 200 | 300 |
Scenario: Testbot - Travel time 3x2 matrix
Given the node map
| a | b | c |
| d | e | f |
And the ways
| nodes |
| abc |
| def |
| ad |
| be |
| cf |
When I request a travel time matrix I should get
| | b | e | f |
| a | 100 | 200 | 300 |
| b | 0 | 100 | 200 |
Scenario: Testbog - All coordinates are from same small component
Given a grid size of 300 meters
Given the extract extra arguments "--small-component-size 4"
Given the node map
| a | b | | f |
| d | e | | g |
And the ways
| nodes |
| ab |
| be |
| ed |
| da |
| fg |
When I request a travel time matrix I should get
| | f | g |
| f | 0 | 300 |
| g | 300 | 0 |
Scenario: Testbog - Coordinates are from different small component and snap to big CC
Given a grid size of 300 meters
Given the extract extra arguments "--small-component-size 4"
Given the node map
| a | b | | f | h |
| d | e | | g | i |
And the ways
| nodes |
| ab |
| be |
| ed |
| da |
| fg |
| hi |
When I request a travel time matrix I should get
| | f | g | h | i |
| f | 0 | 300 | 0 | 300 |
| g | 300 | 0 | 300 | 0 |
| h | 0 | 300 | 0 | 300 |
| i | 300 | 0 | 300 | 0 |

View File

@ -5,6 +5,8 @@ Feature: Turn directions/codes
Given the profile "testbot" Given the profile "testbot"
Scenario: Turn directions Scenario: Turn directions
Given the query options
| instructions | true |
Given the node map Given the node map
| o | p | a | b | c | | o | p | a | b | c |
| n | | | | d | | n | | | | d |
@ -31,7 +33,7 @@ Feature: Turn directions/codes
| xo | | xo |
| xp | | xp |
When I match with turns I should get When I match I should get
| trace | route | turns | matchings | | trace | route | turns | matchings |
| im | xi,xm | head,left,destination | im | | im | xi,xm | head,left,destination | im |
| io | xi,xo | head,slight_left,destination | io | | io | xi,xo | head,slight_left,destination | io |
@ -79,4 +81,41 @@ Feature: Turn directions/codes
| gm | xg,xm | head,slight_left,destination | gm | | gm | xg,xm | head,slight_left,destination | gm |
| go | xg,xo | head,straight,destination | go | | go | xg,xo | head,straight,destination | go |
| ga | xg,xa | head,slight_right,destination | ga | | ga | xg,xa | head,slight_right,destination | ga |
| gc | xg,xc | head,right,destination | gc | | gc | xg,xc | head,right,destination | gc |
Scenario: Turn directions
Given the query options
| instructions | true |
Given the node map
| o | p | a | b | c |
| n | | | | d |
| m | | x | | e |
| l | | | | f |
| k | j | i | h | g |
And the ways
| nodes |
| xa |
| xb |
| xc |
| xd |
| xe |
| xf |
| xg |
| xh |
| xi |
| xj |
| xk |
| xl |
| xm |
| xn |
| xo |
| xp |
When I match I should get
| trace | route | turns | matchings | duration |
| im | xi,xm | head,left,destination | im | 80 |
| io | xi,xo | head,slight_left,destination | io | 88 |
| ia | xi,xa | head,straight,destination | ia | 80 |
| ic | xi,xc | head,slight_right,destination | ic | 88 |
| ie | xi,xe | head,right,destination | ie | 60 |

Some files were not shown because too many files have changed in this diff Show More