Compare commits

..

No commits in common. "master" and "v0.4.1" have entirely different histories.

2625 changed files with 33541 additions and 591358 deletions

View File

@ -1,7 +0,0 @@
{
"plugins": ["transform-class-properties"],
"presets": [
"@babel/preset-env",
"@babel/preset-react"
]
}

View File

@ -2,89 +2,53 @@
Language: Cpp
# BasedOnStyle: LLVM
AccessModifierOffset: -2
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
ConstructorInitializerIndentWidth: 4
AlignEscapedNewlinesLeft: false
AlignOperands: true
AlignTrailingComments: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: All
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AllowShortFunctionsOnASingleLine: true
AlwaysBreakTemplateDeclarations: false
BinPackArguments: false
BinPackParameters: false
BraceWrapping:
AfterClass: true
AfterControlStatement: true
AfterEnum: true
AfterFunction: true
AfterNamespace: true
AfterObjCDeclaration: true
AfterStruct: true
AfterUnion: true
BeforeCatch: true
BeforeElse: true
IndentBraces: true
AlwaysBreakBeforeMultilineStrings: false
BreakBeforeBinaryOperators: false
BreakBeforeBraces: Allman
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BinPackParameters: false
ColumnLimit: 100
CommentPragmas: '^ IWYU pragma:'
ConstructorInitializerAllOnOneLineOrOnePerLine: false
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
DerivePointerBinding: false
ExperimentalAutoDetectBinPacking: false
ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ]
IncludeCategories:
- Regex: '^<'
Priority: 3
- Regex: '^"(osrm|util|engine|extract|contract)/'
Priority: 2
- Regex: '.*'
Priority: 1
IndentCaseLabels: false
IndentWidth: 4
IndentWrappedFunctionNames: false
KeepEmptyLinesAtTheStartOfBlocks: true
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
KeepEmptyLinesAtTheStartOfBlocks: true
NamespaceIndentation: None
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakBeforeFirstCallParameter: 19
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyExcessCharacter: 1000000
PenaltyBreakFirstLessLess: 120
PenaltyExcessCharacter: 1000
PenaltyReturnTypeOnItsOwnLine: 60
PointerAlignment: Right
ReflowComments: true
SortIncludes: true
SpaceAfterCStyleCast: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
PointerBindsToType: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Cpp11BracedListStyle: true
Standard: Cpp11
IndentWidth: 4
TabWidth: 8
UseTab: Never
BreakBeforeBraces: Allman
IndentFunctionDeclarationAfterType: false
SpacesInParentheses: false
SpacesInAngles: false
SpaceInEmptyParentheses: false
SpacesInCStyleCastParentheses: false
SpacesInContainerLiterals: true
SpaceBeforeAssignmentOperators: true
ContinuationIndentWidth: 4
CommentPragmas: '^ IWYU pragma:'
ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ]
SpaceBeforeParens: ControlStatements
...

View File

@ -1,101 +0,0 @@
---
Checks: >
bugprone-*,
-bugprone-narrowing-conversions,
-bugprone-easily-swappable-parameters,
-bugprone-branch-clone,
-bugprone-misplaced-widening-cast,
-bugprone-exception-escape,
-bugprone-implicit-widening-of-multiplication-result,
-bugprone-integer-division,
-bugprone-reserved-identifier,
-bugprone-unhandled-self-assignment,
-bugprone-forward-declaration-namespace,
-bugprone-sizeof-expression,
-bugprone-throw-keyword-missing,
-bugprone-chained-comparison,
-bugprone-incorrect-enable-if,
-bugprone-switch-missing-default-case,
-bugprone-empty-catch,
-bugprone-unchecked-optional-access,
-clang-analyzer-*,
-clang-diagnostic-deprecated-declarations,
-clang-diagnostic-constant-conversion,
cppcoreguidelines-avoid-goto,
cppcoreguidelines-no-malloc,
cppcoreguidelines-virtual-class-destructor,
google-*,
-google-build-explicit-make-pair,
-google-build-using-namespace,
-google-explicit-constructor,
-google-default-arguments,
-google-readability-braces-around-statements,
-google-readability-casting,
-google-readability-namespace-comments,
-google-readability-function,
-google-readability-todo,
-google-runtime-int,
-google-build-namespaces,
-google-runtime-references,
-google-readability-function-size,
llvm-*,
-llvm-namespace-comment,
-llvm-qualified-auto,
-llvm-include-order,
-llvm-else-after-return,
-llvm-header-guard,
-llvm-twine-local,
misc-*,
-misc-argument-comment,
-misc-const-correctness,
-misc-non-private-member-variables-in-classes,
-misc-unconventional-assign-operator,
-misc-no-recursion,
-misc-misplaced-const,
-misc-definitions-in-headers,
-misc-unused-parameters,
-misc-include-cleaner,
modernize-concat-nested-namespaces,
modernize-use-using,
performance-*,
-performance-no-int-to-ptr,
-performance-enum-size,
-performance-avoid-endl,
readability-*,
-readability-avoid-const-params-in-decls,
-readability-braces-around-statements,
-readability-container-size-empty,
-readability-convert-member-functions-to-static,
-readability-const-return-type,
-readability-function-cognitive-complexity,
-readability-function-size,
-readability-identifier-naming,
-readability-implicit-bool-conversion,
-readability-magic-numbers,
-readability-else-after-return,
-readability-inconsistent-declaration-parameter-name,
-readability-isolate-declaration,
-readability-identifier-length,
-readability-redundant-declaration,
-readability-uppercase-literal-suffix,
-readability-named-parameter,
-readability-qualified-auto,
-readability-suspicious-call-argument,
-readability-redundant-access-specifiers,
-readability-redundant-member-init,
-readability-static-definition-in-anonymous-namespace,
-readability-use-anyofallof,
-readability-simplify-boolean-expr,
-readability-make-member-function-const,
-readability-redundant-string-init,
-readability-non-const-parameter,
-readability-redundant-inline-specifier,
-readability-avoid-nested-conditional-operator,
-readability-avoid-return-with-void-value,
-readability-redundant-casting,
-readability-static-accessed-through-instance
WarningsAsErrors: '*'
HeaderFilterRegex: '.*'

View File

@ -1,14 +0,0 @@
# Kind-specific patterns to check AST nodes against. Both python-clang and
# libclang docs explain CursorKind, with differences in detail. See also:
# - https://github.com/llvm-mirror/clang/blob/aca4fe314a55cacae29e1548cb7bfd2119c6df4c/bindings/python/clang/cindex.py#L599
# - http://clang.llvm.org/doxygen/group__CINDEX.html#gaaccc432245b4cd9f2d470913f9ef0013
# - https://docs.python.org/2/library/re.html#regular-expression-syntax
class_decl: '^([A-Z]+[a-z]+)+$'
struct_decl: '^([A-Z]+[a-z]+)+$'
field_decl: '^[a-z_]+$'
var_decl: '^[a-z]+[a-z0-9_]*$'
parm_decl: '^[a-z]*[a-z0-9_]*$'
namespace: '^[a-z_]*$'
cxx_method: '^([A-Z]+[a-z]+)+$'
function_decl: '^[a-z]+([A-Z]+[a-z]+)*$'

View File

@ -1,2 +0,0 @@
test
build

View File

@ -1,30 +0,0 @@
# EditorConfig is awesome: http://EditorConfig.org
#
# NOTE: Keep settings in sync with the master .clang-format file
#
# top-most EditorConfig file
root = true
# CMake configuration files
[{CMakeLists.txt,CMakeSettings.json,*.cmake}]
indent_size = 2
indent_style = space
trim_trailing_whitespace = true
# CI configuration files
[{.travis.yml,appveyor.yml}]
indent_size = 2
indent_style = space
trim_trailing_whitespace = true
# Unix shell scripts
[*.sh]
end_of_line = lf
indent_style = space
trim_trailing_whitespace = true
# Windows shell scripts
[*.bat]
end_of_line = crlf
indent_style = space
trim_trailing_whitespace = true

View File

@ -1,2 +0,0 @@
features/support/flatbuffers.js
features/support/fbresult_generated.js

View File

@ -1,28 +0,0 @@
{
"rules": {
"indent": [
2,
4
],
"quotes": [
1,
"single"
],
"linebreak-style": [
2,
"unix"
],
"semi": [
2,
"always"
],
"no-console": [
1
]
},
"env": {
"es6": true,
"node": true
},
"extends": "eslint:recommended"
}

18
.gitattributes vendored
View File

@ -1,18 +0,0 @@
# Set the default behavior, in case people don't have core.autocrlf set.
* text=auto
# Explicitly declare text files you want to always be normalized and converted
# to native line endings on checkout.
*.cpp text
*.hpp text
# Declare files that will always have CRLF line endings on checkout.
*.bat text eol=crlf
*.cmd text eol=crlf
*.ps1 text eol=crlf
# Declare files that will always have LF line endings on checkout.
*.sh text eol=lf
# https://eslint.org/docs/latest/rules/linebreak-style#using-this-rule-with-version-control-systems
*.js text eol=lf

View File

@ -1,65 +0,0 @@
name: Build and Publish Docker Image
on:
release:
types: [published, prereleased]
env:
IMAGE_NAME: openharbor/osrm-backend
jobs:
publish:
strategy:
matrix:
docker-base-image: ["debian", "alpine"]
runs-on: ubuntu-latest
steps:
- name: Check out the repo
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Docker meta
id: meta
uses: docker/metadata-action@v4
with:
images: ${{ env.IMAGE_NAME }}
- name: Docker meta - debug
id: metadebug
uses: docker/metadata-action@v4
with:
images: ${{ env.IMAGE_NAME }}
flavor: |
latest=true
suffix=-debug,onlatest=true
- name: Log in to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN }}
- name: Build and push debug image
uses: docker/build-push-action@v4
with:
push: true
platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/riscv64
file: ./docker/Dockerfile-${{ matrix.docker-base-image }}
tags: ${{ steps.metadebug.outputs.tags }}
build-args: |
DOCKER_TAG=${{ join(steps.metadebug.outputs.tags) }}-${{ matrix.docker-base-image }}
- name: Build and push normal image
uses: docker/build-push-action@v4
with:
push: true
platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/riscv64
file: ./docker/Dockerfile-${{ matrix.docker-base-image }}
tags: ${{ steps.meta.outputs.tags }}
build-args: |
DOCKER_TAG=${{ join(steps.meta.outputs.tags) }}-${{ matrix.docker-base-image }}

View File

@ -1,30 +0,0 @@
---
name: Bug Report
about: Report issue with osrm-backend
labels: Bug Report
---
# Issue
Please describe the issue you are seeing with OSRM.
Images are a good way to illustrate your problem.
**Note**: If your issue relates to the demo site (https://map.project-osrm.org) or routing provided on openstreetmap.org, be aware that they use separate [profile settings](https://github.com/fossgis-routing-server/cbf-routing-profiles) from those provided by default in `osrm-backend`.
If your issue relates to the demo site or openstreetmap.org behaviour, please check these profiles first to see if they explain the behaviour before creating an issue here.
# Steps to reproduce
Please provide the steps required to reproduce your problem.
- `osrm-backend` version being used
- OSM extract that was processed
- Processing commands (e.g. CH vs MLD processing)
- Server queries
If you're reporting an issue with https://map.project-osrm.org, please provide a link to the problematic request.
# Specifications
Please provide details of your development environment.
- Library/dependency versions
- Operating system
- Hardware

View File

@ -1,10 +0,0 @@
---
name: Feature Request
about: Request a new feature in osrm-backend
labels: Feature Request
---
# Feature
Please describe the feature you would like to see in OSRM.
Images are often a good way to illustrate your requested feature.

View File

@ -1,19 +0,0 @@
# Issue
What issue is this PR targeting? If there is no issue that addresses the problem, please open a corresponding issue and link it here.
Please read our [documentation](https://github.com/Project-OSRM/osrm-backend/blob/master/docs/releasing.md) on release and version management.
If your PR is still work in progress please attach the relevant label.
## Tasklist
- [ ] CHANGELOG.md entry ([How to write a changelog entry](http://keepachangelog.com/en/1.0.0/#how))
- [ ] update relevant [Wiki pages](https://github.com/Project-OSRM/osrm-backend/wiki)
- [ ] add tests (see [testing documentation](https://github.com/Project-OSRM/osrm-backend/blob/master/docs/testing.md))
- [ ] review
- [ ] adjust for comments
- [ ] cherry pick to release branch
## Requirements / Relations
Link any requirements here. Other pull requests this PR is based on?

View File

@ -1,84 +0,0 @@
name: build and publish container image
on:
push:
tags:
- 'v*'
jobs:
publish:
strategy:
matrix:
docker-base-image: ["debian", "alpine"]
runs-on: ubuntu-latest
steps:
- name: Check out the repo
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Docker meta
id: meta
uses: docker/metadata-action@v3
with:
images: ghcr.io/${{ github.repository }}
- name: Docker meta - debug
id: metadebug
uses: docker/metadata-action@v3
with:
images: ghcr.io/${{ github.repository }}
flavor: |
latest=true
suffix=-debug,onlatest=true
- name: Docker meta - assertions
id: metaassertions
uses: docker/metadata-action@v3
with:
images: ghcr.io/${{ github.repository }}
flavor: |
latest=true
suffix=-assertions,onlatest=true
- name: Log in to GitHub Docker Registry
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build container image - debug
uses: docker/build-push-action@v2
with:
push: true
platforms: linux/amd64,linux/arm64
file: ./docker/Dockerfile-${{ matrix.docker-base-image }}
tags: ${{ steps.metadebug.outputs.tags }}
build-args: |
DOCKER_TAG=${{ join(steps.metadebug.outputs.tags ) }}-${{ matrix.docker-base-image }}
- name: Build container image - assertions
uses: docker/build-push-action@v2
with:
push: true
platforms: linux/amd64,linux/arm64
file: ./docker/Dockerfile-${{ matrix.docker-base-image }}
tags: ${{ steps.metaassertions.outputs.tags }}
build-args: |
DOCKER_TAG=${{ join(steps.metaassertions.outputs.tags ) }}-${{ matrix.docker-base-image }}
# build and publish "normal" image as last to get it listed on top
- name: Build container image - normal
uses: docker/build-push-action@v2
with:
push: true
platforms: linux/amd64,linux/arm64
file: ./docker/Dockerfile-${{ matrix.docker-base-image }}
tags: ${{ steps.meta.outputs.tags }}
build-args: |
DOCKER_TAG=${{ join(steps.meta.outputs.tags ) }}-${{ matrix.docker-base-image }}

View File

@ -1,786 +0,0 @@
name: osrm-backend CI
on:
push:
branches:
- master
tags:
- v[1-9]+.[0-9]+.[0-9]+
- v[1-9]+.[0-9]+.[0-9]+-[a-zA-Z]+.[0-9]+
- v[1-9]+.[0-9]+-[0-9a-zA-Z]+
pull_request:
branches:
- master
env:
CCACHE_TEMPDIR: /tmp/.ccache-temp
CCACHE_COMPRESS: 1
CASHER_TIME_OUT: 599 # one second less than 10m to avoid 10m timeout error: https://github.com/Project-OSRM/osrm-backend/issues/2742
CMAKE_VERSION: 3.21.2
ENABLE_NODE_BINDINGS: "ON"
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
windows-release-node:
needs: format-taginfo-docs
runs-on: windows-2022
continue-on-error: false
env:
BUILD_TYPE: Release
steps:
- uses: actions/checkout@v4
- run: pip install "conan<2.0.0"
- run: conan --version
- run: cmake --version
- uses: actions/setup-node@v4
with:
node-version: 18
- run: node --version
- run: npm --version
- name: Prepare environment
shell: bash
run: |
PACKAGE_JSON_VERSION=$(node -e "console.log(require('./package.json').version)")
echo PUBLISH=$([[ "${GITHUB_REF:-}" == "refs/tags/v${PACKAGE_JSON_VERSION}" ]] && echo "On" || echo "Off") >> $GITHUB_ENV
- run: npm install --ignore-scripts
- run: npm link --ignore-scripts
- name: Build
shell: bash
run: |
mkdir build
cd build
cmake -DCMAKE_BUILD_TYPE=Release -DENABLE_CONAN=ON -DENABLE_NODE_BINDINGS=ON ..
cmake --build . --config Release
# TODO: MSVC goes out of memory when building our tests
# - name: Run tests
# shell: bash
# run: |
# cd build
# cmake --build . --config Release --target tests
# # TODO: run tests
# - name: Run node tests
# shell: bash
# run: |
# ./lib/binding/osrm-extract.exe -p profiles/car.lua test/data/monaco.osm.pbf
# mkdir -p test/data/ch
# cp test/data/monaco.osrm* test/data/ch/
# ./lib/binding/osrm-contract.exe test/data/ch/monaco.osrm
# ./lib/binding/osrm-datastore.exe test/data/ch/monaco.osrm
# node test/nodejs/index.js
- name: Build Node package
shell: bash
run: ./scripts/ci/node_package.sh
- name: Publish Node package
if: ${{ env.PUBLISH == 'On' }}
uses: ncipollo/release-action@v1
with:
allowUpdates: true
artifactErrorsFailBuild: true
artifacts: build/stage/**/*.tar.gz
omitBody: true
omitBodyDuringUpdate: true
omitName: true
omitNameDuringUpdate: true
replacesArtifacts: true
token: ${{ secrets.GITHUB_TOKEN }}
format-taginfo-docs:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- name: Use Node.js
uses: actions/setup-node@v4
with:
node-version: 18
- name: Enable Node.js cache
uses: actions/cache@v4
with:
path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: Prepare environment
run: |
npm ci --ignore-scripts
clang-format-15 --version
- name: Run checks
run: |
./scripts/check_taginfo.py taginfo.json profiles/car.lua
./scripts/format.sh && ./scripts/error_on_dirty.sh
node ./scripts/validate_changelog.js
npm run docs && ./scripts/error_on_dirty.sh
npm audit --production
docker-image-matrix:
strategy:
matrix:
docker-base-image: ["debian", "alpine"]
needs: format-taginfo-docs
runs-on: ubuntu-22.04
continue-on-error: false
steps:
- name: Check out the repo
uses: actions/checkout@v4
- name: Enable osm.pbf cache
uses: actions/cache@v4
with:
path: berlin-latest.osm.pbf
key: v1-berlin-osm-pbf
restore-keys: |
v1-berlin-osm-pbf
- name: Docker build
run: |
docker build -t osrm-backend-local -f docker/Dockerfile-${{ matrix.docker-base-image }} .
- name: Test Docker image
run: |
if [ ! -f "${PWD}/berlin-latest.osm.pbf" ]; then
wget http://download.geofabrik.de/europe/germany/berlin-latest.osm.pbf
fi
TAG=osrm-backend-local
# when `--memory-swap` value equals `--memory` it means container won't use swap
# see https://docs.docker.com/config/containers/resource_constraints/#--memory-swap-details
MEMORY_ARGS="--memory=1g --memory-swap=1g"
docker run $MEMORY_ARGS -t -v "${PWD}:/data" "${TAG}" osrm-extract --dump-nbg-graph -p /opt/car.lua /data/berlin-latest.osm.pbf
docker run $MEMORY_ARGS -t -v "${PWD}:/data" "${TAG}" osrm-components /data/berlin-latest.osrm.nbg /data/berlin-latest.geojson
if [ ! -s "${PWD}/berlin-latest.geojson" ]
then
>&2 echo "No berlin-latest.geojson found"
exit 1
fi
# removing `.osrm.nbg` to check that whole pipeline works without it
rm -rf "${PWD}/berlin-latest.osrm.nbg"
docker run $MEMORY_ARGS -t -v "${PWD}:/data" "${TAG}" osrm-partition /data/berlin-latest.osrm
docker run $MEMORY_ARGS -t -v "${PWD}:/data" "${TAG}" osrm-customize /data/berlin-latest.osrm
docker run $MEMORY_ARGS --name=osrm-container -t -p 5000:5000 -v "${PWD}:/data" "${TAG}" osrm-routed --algorithm mld /data/berlin-latest.osrm &
curl --retry-delay 3 --retry 10 --retry-all-errors "http://127.0.0.1:5000/route/v1/driving/13.388860,52.517037;13.385983,52.496891?steps=true"
docker stop osrm-container
build-test-publish:
needs: format-taginfo-docs
strategy:
matrix:
include:
- name: gcc-13-debug-cov
continue-on-error: false
node: 20
runs-on: ubuntu-24.04
BUILD_TOOLS: ON
BUILD_TYPE: Debug
CCOMPILER: gcc-13
CUCUMBER_TIMEOUT: 20000
CXXCOMPILER: g++-13
ENABLE_COVERAGE: ON
- name: clang-18-debug-asan-ubsan
continue-on-error: false
node: 20
runs-on: ubuntu-24.04
BUILD_TOOLS: ON
BUILD_TYPE: Debug
CCOMPILER: clang-18
CUCUMBER_TIMEOUT: 20000
CXXCOMPILER: clang++-18
ENABLE_SANITIZER: ON
TARGET_ARCH: x86_64-asan-ubsan
OSRM_CONNECTION_RETRIES: 10
OSRM_CONNECTION_EXP_BACKOFF_COEF: 1.5
- name: clang-18-release
continue-on-error: false
node: 18
runs-on: ubuntu-24.04
BUILD_TOOLS: ON
BUILD_TYPE: Release
CCOMPILER: clang-18
CXXCOMPILER: clang++-18
CUCUMBER_TIMEOUT: 60000
ENABLE_LTO: OFF
- name: clang-18-debug
continue-on-error: false
node: 18
runs-on: ubuntu-24.04
BUILD_TOOLS: ON
BUILD_TYPE: Debug
CCOMPILER: clang-18
CXXCOMPILER: clang++-18
CUCUMBER_TIMEOUT: 60000
ENABLE_LTO: OFF
- name: clang-18-debug-clang-tidy
continue-on-error: false
node: 18
runs-on: ubuntu-24.04
BUILD_TOOLS: ON
BUILD_TYPE: Debug
CCOMPILER: clang-18
CXXCOMPILER: clang++-18
CUCUMBER_TIMEOUT: 60000
ENABLE_CLANG_TIDY: ON
- name: clang-17-release
continue-on-error: false
node: 18
runs-on: ubuntu-24.04
BUILD_TOOLS: ON
BUILD_TYPE: Release
CCOMPILER: clang-17
CXXCOMPILER: clang++-17
CUCUMBER_TIMEOUT: 60000
ENABLE_LTO: OFF
- name: clang-16-release
continue-on-error: false
node: 18
runs-on: ubuntu-24.04
BUILD_TOOLS: ON
BUILD_TYPE: Release
CCOMPILER: clang-16
CXXCOMPILER: clang++-16
CUCUMBER_TIMEOUT: 60000
ENABLE_LTO: OFF
- name: conan-linux-debug-asan-ubsan
continue-on-error: false
node: 18
runs-on: ubuntu-24.04
BUILD_TOOLS: ON
BUILD_TYPE: Release
CCOMPILER: clang-18
CXXCOMPILER: clang++-18
ENABLE_CONAN: ON
ENABLE_SANITIZER: ON
ENABLE_LTO: OFF
- name: conan-linux-release
continue-on-error: false
node: 18
runs-on: ubuntu-24.04
BUILD_TOOLS: ON
BUILD_TYPE: Release
CCOMPILER: clang-18
CXXCOMPILER: clang++-18
ENABLE_CONAN: ON
ENABLE_LTO: OFF
- name: gcc-14-release
continue-on-error: false
node: 20
runs-on: ubuntu-24.04
BUILD_TOOLS: ON
BUILD_TYPE: Release
CCOMPILER: gcc-14
CXXCOMPILER: g++-14
CXXFLAGS: '-Wno-array-bounds -Wno-uninitialized'
- name: gcc-13-release
continue-on-error: false
node: 20
runs-on: ubuntu-24.04
BUILD_TOOLS: ON
BUILD_TYPE: Release
CCOMPILER: gcc-13
CXXCOMPILER: g++-13
CXXFLAGS: '-Wno-array-bounds -Wno-uninitialized'
- name: gcc-12-release
continue-on-error: false
node: 20
runs-on: ubuntu-22.04
BUILD_TOOLS: ON
BUILD_TYPE: Release
CCOMPILER: gcc-12
CXXCOMPILER: g++-12
CXXFLAGS: '-Wno-array-bounds -Wno-uninitialized'
- name: conan-linux-release-node
build_node_package: true
continue-on-error: false
node: 20
runs-on: ubuntu-24.04
BUILD_TYPE: Release
CCOMPILER: clang-16
CXXCOMPILER: clang++-16
ENABLE_CONAN: ON
NODE_PACKAGE_TESTS_ONLY: ON
- name: conan-linux-debug-node
build_node_package: true
continue-on-error: false
node: 20
runs-on: ubuntu-24.04
BUILD_TYPE: Debug
CCOMPILER: clang-16
CXXCOMPILER: clang++-16
ENABLE_CONAN: ON
NODE_PACKAGE_TESTS_ONLY: ON
- name: conan-macos-x64-release-node
build_node_package: true
continue-on-error: true
node: 20
runs-on: macos-13 # x86_64
BUILD_TYPE: Release
CCOMPILER: clang
CXXCOMPILER: clang++
CUCUMBER_TIMEOUT: 60000
ENABLE_ASSERTIONS: ON
ENABLE_CONAN: ON
- name: conan-macos-arm64-release-node
build_node_package: true
continue-on-error: true
node: 20
runs-on: macos-14 # arm64
BUILD_TYPE: Release
CCOMPILER: clang
CXXCOMPILER: clang++
CUCUMBER_TIMEOUT: 60000
ENABLE_ASSERTIONS: ON
ENABLE_CONAN: ON
name: ${{ matrix.name}}
continue-on-error: ${{ matrix.continue-on-error }}
runs-on: ${{ matrix.runs-on }}
env:
BUILD_TOOLS: ${{ matrix.BUILD_TOOLS }}
BUILD_TYPE: ${{ matrix.BUILD_TYPE }}
BUILD_SHARED_LIBS: ${{ matrix.BUILD_SHARED_LIBS }}
CCOMPILER: ${{ matrix.CCOMPILER }}
CFLAGS: ${{ matrix.CFLAGS }}
CUCUMBER_TIMEOUT: ${{ matrix.CUCUMBER_TIMEOUT }}
CXXCOMPILER: ${{ matrix.CXXCOMPILER }}
CXXFLAGS: ${{ matrix.CXXFLAGS }}
ENABLE_ASSERTIONS: ${{ matrix.ENABLE_ASSERTIONS }}
ENABLE_CLANG_TIDY: ${{ matrix.ENABLE_CLANG_TIDY }}
ENABLE_COVERAGE: ${{ matrix.ENABLE_COVERAGE }}
ENABLE_CONAN: ${{ matrix.ENABLE_CONAN }}
ENABLE_SANITIZER: ${{ matrix.ENABLE_SANITIZER }}
NODE_PACKAGE_TESTS_ONLY: ${{ matrix.NODE_PACKAGE_TESTS_ONLY }}
TARGET_ARCH: ${{ matrix.TARGET_ARCH }}
OSRM_CONNECTION_RETRIES: ${{ matrix.OSRM_CONNECTION_RETRIES }}
OSRM_CONNECTION_EXP_BACKOFF_COEF: ${{ matrix.OSRM_CONNECTION_EXP_BACKOFF_COEF }}
ENABLE_LTO: ${{ matrix.ENABLE_LTO }}
steps:
- uses: actions/checkout@v4
- name: Build machine architecture
run: uname -m
- name: Use Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node }}
- name: Enable Node.js cache
uses: actions/cache@v4
with:
path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: Enable compiler cache
uses: actions/cache@v4
with:
path: ~/.ccache
key: ccache-${{ matrix.name }}-${{ github.sha }}
restore-keys: |
ccache-${{ matrix.name }}-
- name: Enable Conan cache
uses: actions/cache@v4
with:
path: ~/.conan
key: v9-conan-${{ matrix.name }}-${{ github.sha }}
restore-keys: |
v9-conan-${{ matrix.name }}-
- name: Enable test cache
uses: actions/cache@v4
with:
path: ${{github.workspace}}/test/cache
key: v4-test-${{ matrix.name }}-${{ github.sha }}
restore-keys: |
v4-test-${{ matrix.name }}-
- name: Prepare environment
run: |
echo "CCACHE_DIR=$HOME/.ccache" >> $GITHUB_ENV
mkdir -p $HOME/.ccache
PACKAGE_JSON_VERSION=$(node -e "console.log(require('./package.json').version)")
echo PUBLISH=$([[ "${GITHUB_REF:-}" == "refs/tags/v${PACKAGE_JSON_VERSION}" ]] && echo "On" || echo "Off") >> $GITHUB_ENV
echo "OSRM_INSTALL_DIR=${GITHUB_WORKSPACE}/install-osrm" >> $GITHUB_ENV
echo "OSRM_BUILD_DIR=${GITHUB_WORKSPACE}/build-osrm" >> $GITHUB_ENV
if [[ "$ENABLE_SANITIZER" == 'ON' ]]; then
# We can only set this after checkout once we know the workspace directory
echo "LSAN_OPTIONS=print_suppressions=0:suppressions=${GITHUB_WORKSPACE}/scripts/ci/leaksanitizer.conf" >> $GITHUB_ENV
echo "UBSAN_OPTIONS=symbolize=1:halt_on_error=1:print_stacktrace=1:suppressions=${GITHUB_WORKSPACE}/scripts/ci/undefinedsanitizer.conf" >> $GITHUB_ENV
echo "ASAN_OPTIONS=print_suppressions=0:suppressions=${GITHUB_WORKSPACE}/scripts/ci/addresssanitizer.conf" >> $GITHUB_ENV
fi
if [[ "${RUNNER_OS}" == "Linux" ]]; then
echo "JOBS=$((`nproc` + 1))" >> $GITHUB_ENV
elif [[ "${RUNNER_OS}" == "macOS" ]]; then
echo "JOBS=$((`sysctl -n hw.ncpu` + 1))" >> $GITHUB_ENV
fi
# See: https://github.com/actions/toolkit/issues/946#issuecomment-1590016041
# We need it to be able to access system folders while restoring cached Boost below
- name: Give tar root ownership
if: runner.os == 'Linux' && matrix.ENABLE_CONAN != 'ON'
run: sudo chown root /bin/tar && sudo chmod u+s /bin/tar
- name: Cache Boost
if: runner.os == 'Linux' && matrix.ENABLE_CONAN != 'ON'
id: cache-boost
uses: actions/cache@v4
with:
path: |
/usr/local/include/boost
/usr/local/lib/libboost*
key: v1-boost-${{ runner.os }}-${{ runner.arch }}-${{ matrix.runs-on }}
restore-keys: |
v1-boost-${{ runner.os }}-${{ runner.arch }}-${{ matrix.runs-on }}
- name: Install Boost
if: steps.cache-boost.outputs.cache-hit != 'true' && runner.os == 'Linux' && matrix.ENABLE_CONAN != 'ON'
run: |
BOOST_VERSION="1.85.0"
BOOST_VERSION_FLAVOR="${BOOST_VERSION}-b2-nodocs"
wget -q https://github.com/boostorg/boost/releases/download/boost-${BOOST_VERSION}/boost-${BOOST_VERSION_FLAVOR}.tar.gz
tar xzf boost-${BOOST_VERSION_FLAVOR}.tar.gz
cd boost-${BOOST_VERSION}
sudo ./bootstrap.sh
sudo ./b2 install
cd ..
sudo rm -rf boost-${BOOST_VERSION}*
- name: Install dev dependencies
run: |
python3 -m pip install "conan<2.0.0" || python3 -m pip install "conan<2.0.0" --break-system-packages
# workaround for issue that GitHub Actions seems to not adding it to PATH after https://github.com/actions/runner-images/pull/6499
# and that's why CI cannot find conan executable installed above
if [[ "${RUNNER_OS}" == "macOS" ]]; then
echo "/Library/Frameworks/Python.framework/Versions/Current/bin" >> $GITHUB_PATH
fi
# ccache
if [[ "${RUNNER_OS}" == "Linux" ]]; then
sudo apt-get update -y && sudo apt-get install ccache
elif [[ "${RUNNER_OS}" == "macOS" ]]; then
brew install ccache
fi
# Linux dev packages
if [ "${ENABLE_CONAN}" != "ON" ]; then
sudo apt-get update -y
sudo apt-get install -y libbz2-dev libxml2-dev libzip-dev liblua5.2-dev
if [[ "${CCOMPILER}" != clang-* ]]; then
sudo apt-get install -y ${CXXCOMPILER}
fi
if [[ "${ENABLE_COVERAGE}" == "ON" ]]; then
sudo apt-get install -y lcov
fi
fi
# TBB
TBB_VERSION=2021.12.0
if [[ "${RUNNER_OS}" == "Linux" ]]; then
TBB_URL="https://github.com/oneapi-src/oneTBB/releases/download/v${TBB_VERSION}/oneapi-tbb-${TBB_VERSION}-lin.tgz"
elif [[ "${RUNNER_OS}" == "macOS" ]]; then
TBB_URL="https://github.com/oneapi-src/oneTBB/releases/download/v${TBB_VERSION}/oneapi-tbb-${TBB_VERSION}-mac.tgz"
fi
wget --tries 5 ${TBB_URL} -O onetbb.tgz
tar zxvf onetbb.tgz
sudo cp -a oneapi-tbb-${TBB_VERSION}/lib/. /usr/local/lib/
sudo cp -a oneapi-tbb-${TBB_VERSION}/include/. /usr/local/include/
- name: Add Clang 18 to list of Conan compilers # workaround for the issue that Conan 1.x doesn't know about Clang 18
if: ${{ matrix.ENABLE_CONAN == 'ON' && matrix.CCOMPILER == 'clang-18' }}
run: |
sudo wget https://github.com/mikefarah/yq/releases/download/v4.9.6/yq_linux_amd64 -O /usr/bin/yq && sudo chmod +x /usr/bin/yq
conan config init
yq eval '.compiler.clang.version += ["18"]' -i "$HOME/.conan/settings.yml"
- name: Add Apple-clang 16 to list of Conan compilers # workaround for the issue that Conan 1.x doesn't know about Apple-clang 16
if: ${{ matrix.ENABLE_CONAN == 'ON' && matrix.runs-on == 'macos-14' }}
run: |
sudo wget https://github.com/mikefarah/yq/releases/download/v4.9.6/yq_darwin_arm64 -O /usr/local/bin/yq && sudo chmod +x /usr/local/bin/yq
conan config init
yq eval '.compiler.apple-clang.version += ["16.0"]' -i "$HOME/.conan/settings.yml"
- name: Prepare build
run: |
mkdir ${OSRM_BUILD_DIR}
ccache --max-size=256M
npm ci --ignore-scripts
if [[ "${ENABLE_COVERAGE}" == "ON" ]]; then
lcov --directory . --zerocounters # clean cached files
fi
echo "CC=${CCOMPILER}" >> $GITHUB_ENV
echo "CXX=${CXXCOMPILER}" >> $GITHUB_ENV
if [[ "${RUNNER_OS}" == "macOS" ]]; then
# missing from GCC path, needed for conan builds of libiconv, for example.
sudo xcode-select --switch /Library/Developer/CommandLineTools
echo "LIBRARY_PATH=${LIBRARY_PATH}:/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/lib" >> $GITHUB_ENV
echo "CPATH=${CPATH}:/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include" >> $GITHUB_ENV
fi
- name: Build and install OSRM
run: |
echo "Using ${JOBS} jobs"
pushd ${OSRM_BUILD_DIR}
ccache --zero-stats
cmake .. -DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
-DENABLE_CONAN=${ENABLE_CONAN:-OFF} \
-DENABLE_ASSERTIONS=${ENABLE_ASSERTIONS:-OFF} \
-DENABLE_CLANG_TIDY=${ENABLE_CLANG_TIDY:-OFF} \
-DBUILD_SHARED_LIBS=${BUILD_SHARED_LIBS:-OFF} \
-DENABLE_COVERAGE=${ENABLE_COVERAGE:-OFF} \
-DENABLE_NODE_BINDINGS=${ENABLE_NODE_BINDINGS:-OFF} \
-DENABLE_SANITIZER=${ENABLE_SANITIZER:-OFF} \
-DBUILD_TOOLS=${BUILD_TOOLS:-OFF} \
-DENABLE_CCACHE=ON \
-DENABLE_LTO=${ENABLE_LTO:-ON} \
-DCMAKE_INSTALL_PREFIX=${OSRM_INSTALL_DIR}
make --jobs=${JOBS}
if [[ "${NODE_PACKAGE_TESTS_ONLY}" != "ON" ]]; then
make tests --jobs=${JOBS}
make benchmarks --jobs=${JOBS}
sudo make install
if [[ "${RUNNER_OS}" == "Linux" ]]; then
echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${OSRM_INSTALL_DIR}/lib" >> $GITHUB_ENV
fi
echo "PKG_CONFIG_PATH=${OSRM_INSTALL_DIR}/lib/pkgconfig" >> $GITHUB_ENV
fi
popd
- name: Build example
if: ${{ matrix.NODE_PACKAGE_TESTS_ONLY != 'ON' }}
run: |
mkdir example/build && pushd example/build
cmake .. -DCMAKE_BUILD_TYPE=${BUILD_TYPE}
make --jobs=${JOBS}
popd
- name: Run all tests
if: ${{ matrix.NODE_PACKAGE_TESTS_ONLY != 'ON' }}
run: |
make -C test/data benchmark
# macOS SIP strips the linker path. Reset this inside the running shell
export LD_LIBRARY_PATH=${{ env.LD_LIBRARY_PATH }}
./example/build/osrm-example test/data/mld/monaco.osrm
# All tests assume to be run from the build directory
pushd ${OSRM_BUILD_DIR}
for i in ./unit_tests/*-tests ; do echo Running $i ; $i ; done
if [ -z "${ENABLE_SANITIZER}" ]; then
npm run nodejs-tests
fi
popd
npm test
- name: Use Node 18
if: ${{ matrix.NODE_PACKAGE_TESTS_ONLY == 'ON' }}
uses: actions/setup-node@v4
with:
node-version: 18
- name: Run Node package tests on Node 18
if: ${{ matrix.NODE_PACKAGE_TESTS_ONLY == 'ON' }}
run: |
node --version
npm run nodejs-tests
- name: Use Node 20
if: ${{ matrix.NODE_PACKAGE_TESTS_ONLY == 'ON' }}
uses: actions/setup-node@v4
with:
node-version: 20
- name: Run Node package tests on Node 20
if: ${{ matrix.NODE_PACKAGE_TESTS_ONLY == 'ON' }}
run: |
node --version
npm run nodejs-tests
- name: Use Node latest
if: ${{ matrix.NODE_PACKAGE_TESTS_ONLY == 'ON' }}
uses: actions/setup-node@v4
with:
node-version: latest
- name: Run Node package tests on Node-latest
if: ${{ matrix.NODE_PACKAGE_TESTS_ONLY == 'ON' }}
run: |
node --version
npm run nodejs-tests
- name: Upload test logs
uses: actions/upload-artifact@v4
if: failure()
with:
name: logs
path: test/logs/
# - name: Generate code coverage
# if: ${{ matrix.ENABLE_COVERAGE == 'ON' }}
# run: |
# lcov --directory . --capture --output-file coverage.info # capture coverage info
# lcov --remove coverage.info '/usr/*' --output-file coverage.info # filter out system
# lcov --list coverage.info #debug info
# # Uploading report to CodeCov
# - name: Upload code coverage
# if: ${{ matrix.ENABLE_COVERAGE == 'ON' }}
# uses: codecov/codecov-action@v4
# with:
# files: coverage.info
# name: codecov-osrm-backend
# fail_ci_if_error: true
# verbose: true
- name: Build Node package
if: ${{ matrix.build_node_package }}
run: ./scripts/ci/node_package.sh
- name: Publish Node package
if: ${{ matrix.build_node_package && env.PUBLISH == 'On' }}
uses: ncipollo/release-action@v1
with:
allowUpdates: true
artifactErrorsFailBuild: true
artifacts: build/stage/**/*.tar.gz
omitBody: true
omitBodyDuringUpdate: true
omitName: true
omitNameDuringUpdate: true
replacesArtifacts: true
token: ${{ secrets.GITHUB_TOKEN }}
- name: Show CCache statistics
run: |
ccache -p
ccache -s
benchmarks:
if: github.event_name == 'pull_request'
needs: [format-taginfo-docs]
runs-on: self-hosted
env:
CCOMPILER: clang-16
CXXCOMPILER: clang++-16
CC: clang-16
CXX: clang++-16
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.pull_request.number }}
GITHUB_REPOSITORY: ${{ github.repository }}
RUN_BIG_BENCHMARK: ${{ contains(github.event.pull_request.labels.*.name, 'Performance') }}
steps:
- name: Checkout PR Branch
uses: actions/checkout@v4
with:
ref: ${{ github.head_ref }}
path: pr
- name: Activate virtualenv
run: |
python3 -m venv .venv
source .venv/bin/activate
echo PATH=$PATH >> $GITHUB_ENV
pip install "conan<2.0.0" "requests==2.31.0" "numpy==1.26.4"
- name: Prepare data
run: |
if [ "$RUN_BIG_BENCHMARK" = "true" ]; then
rm -rf ~/data.osm.pbf
wget http://download.geofabrik.de/europe/poland-latest.osm.pbf -O ~/data.osm.pbf --quiet
gunzip -c ./pr/test/data/poland_gps_traces.csv.gz > ~/gps_traces.csv
else
if [ ! -f "~/data.osm.pbf" ]; then
wget http://download.geofabrik.de/europe/germany/berlin-latest.osm.pbf -O ~/data.osm.pbf
else
echo "Using cached data.osm.pbf"
fi
gunzip -c ./pr/test/data/berlin_gps_traces.csv.gz > ~/gps_traces.csv
fi
- name: Prepare environment
run: |
echo "CCACHE_DIR=$HOME/.ccache" >> $GITHUB_ENV
mkdir -p $HOME/.ccache
ccache --zero-stats
ccache --max-size=256M
- name: Checkout Base Branch
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.base.ref }}
path: base
- name: Build Base Branch
run: |
cd base
npm ci --ignore-scripts
cd ..
mkdir base/build
cd base/build
cmake -DENABLE_CONAN=ON -DCMAKE_BUILD_TYPE=Release -DENABLE_NODE_BINDINGS=ON ..
make -j$(nproc)
make -j$(nproc) benchmarks
cd ..
make -C test/data
- name: Build PR Branch
run: |
cd pr
npm ci --ignore-scripts
cd ..
mkdir -p pr/build
cd pr/build
cmake -DENABLE_CONAN=ON -DCMAKE_BUILD_TYPE=Release -DENABLE_NODE_BINDINGS=ON ..
make -j$(nproc)
make -j$(nproc) benchmarks
cd ..
make -C test/data
# we run benchmarks in tmpfs to avoid impact of disk IO
- name: Create folder for tmpfs
run: |
# if by any chance it was mounted before(e.g. due to previous job failed), unmount it
sudo umount ~/benchmarks | true
rm -rf ~/benchmarks
mkdir -p ~/benchmarks
# see https://llvm.org/docs/Benchmarking.html
- name: Run PR Benchmarks
run: |
sudo cset shield -c 2-3 -k on
sudo mount -t tmpfs -o size=4g none ~/benchmarks
cp -rf pr/build ~/benchmarks/build
cp -rf pr/lib ~/benchmarks/lib
mkdir -p ~/benchmarks/test
cp -rf pr/test/data ~/benchmarks/test/data
cp -rf pr/profiles ~/benchmarks/profiles
sudo cset shield --exec -- ./pr/scripts/ci/run_benchmarks.sh -f ~/benchmarks -r $(pwd)/pr_results -s $(pwd)/pr -b ~/benchmarks/build -o ~/data.osm.pbf -g ~/gps_traces.csv
sudo umount ~/benchmarks
sudo cset shield --reset
- name: Run Base Benchmarks
run: |
sudo cset shield -c 2-3 -k on
sudo mount -t tmpfs -o size=4g none ~/benchmarks
cp -rf base/build ~/benchmarks/build
cp -rf base/lib ~/benchmarks/lib
mkdir -p ~/benchmarks/test
cp -rf base/test/data ~/benchmarks/test/data
cp -rf base/profiles ~/benchmarks/profiles
# TODO: remove it when base branch will have this file at needed location
if [ ! -f ~/benchmarks/test/data/portugal_to_korea.json ]; then
cp base/src/benchmarks/portugal_to_korea.json ~/benchmarks/test/data/portugal_to_korea.json
fi
# we intentionally use scripts from PR branch to be able to update them and see results in the same PR
sudo cset shield --exec -- cset shield --exec -- ./pr/scripts/ci/run_benchmarks.sh -f ~/benchmarks -r $(pwd)/base_results -s $(pwd)/pr -b ~/benchmarks/build -o ~/data.osm.pbf -g ~/gps_traces.csv
sudo umount ~/benchmarks
sudo cset shield --reset
- name: Post Benchmark Results
run: |
python3 pr/scripts/ci/post_benchmark_results.py base_results pr_results
- name: Show CCache statistics
run: |
ccache -p
ccache -s
ci-complete:
runs-on: ubuntu-22.04
needs: [build-test-publish, docker-image-matrix, windows-release-node, benchmarks]
steps:
- run: echo "CI complete"

View File

@ -1,29 +0,0 @@
name: 'Close stale issues'
on:
# NOTE: uncomment if you want to test changes to this file in PRs CI
# pull_request:
# branches:
# - master
schedule:
- cron: '30 1 * * *' # every day at 1:30am
permissions:
issues: write
pull-requests: write
jobs:
stale:
runs-on: ubuntu-24.04
steps:
- uses: actions/stale@v9
with:
operations-per-run: 3000
stale-issue-message: 'This issue seems to be stale. It will be closed in 30 days if no further activity occurs.'
stale-pr-message: 'This PR seems to be stale. Is it still relevant?'
days-before-issue-stale: 180 # 6 months
days-before-issue-close: 30 # 1 month
days-before-pr-stale: 180 # 6 months
days-before-pr-close: -1 # never close PRs
exempt-issue-labels: 'Do Not Stale,Feature Request,Performance,Bug Report,CI,Starter Task,Refactor,Guidance'

63
.gitignore vendored
View File

@ -1,9 +1,3 @@
# pre compiled dependencies #
#############################
osrm-deps
.ycm_extra_conf.py
# Compiled source #
###################
*.com
@ -41,13 +35,9 @@ Thumbs.db
# build related files #
#######################
/_build*
/build/
/example/build/
/test/data/monaco.osrm*
/test/data/ch
/test/data/mld
/cmake/postinst
/Util/UUID.cpp
/Util/GitDescription.cpp
# Eclipse related files #
#########################
@ -56,19 +46,31 @@ Thumbs.db
.cproject
.project
# Visual Studio (Code) related files #
######################################
/.vs*
/*.local.bat
/CMakeSettings.json
/.cache
# stxxl related files #
#######################
.stxxl
stxxl.log
stxxl.errlog
# Jetbrains related files #
###########################
.idea/
# compiled protobuffers #
#########################
/DataStructures/pbf-proto/*.pb.h
/DataStructures/pbf-proto/*.pb.cc
# Compiled Binary Files #
# External Libs #
#################
/lib/
/win/lib
# Visual Studio Temp + build Files #
####################################
/win/*.user
/win/*.ncb
/win/*.suo
/win/Debug/
/win/Release/
/win/bin/
/win/bin-debug/
/osrm-extract
/osrm-io-benchmark
/osrm-components
@ -77,31 +79,14 @@ Thumbs.db
/osrm-prepare
/osrm-unlock-all
/osrm-cli
/osrm-check-hsgr
/osrm-springclean
/nohup.out
# Sandbox folder #
###################
/sandbox/
# Test related files #
######################
/test/profile.lua
/test/cache
/test/speeds.csv
/test/penalties.csv
node_modules
# Deprecated config file #
##########################
/server.ini
*.swp
# local lua debugging file
debug.lua
# node-osrm artifacts
lib/binding

View File

@ -1,16 +0,0 @@
*
!README.md
!CHANGELOG.md
!CONTRIBUTING.MD
!LICENCE.TXT
!package.json
!package-lock.json
!yarn.lock
!docs
!example
!taginfo.json
!lib/*.js
!profiles/*
!profiles/lib/*
!profiles/examples/*
!scripts/node_install.sh

54
.travis.yml Normal file
View File

@ -0,0 +1,54 @@
language: cpp
compiler:
- gcc
# - clang
# Make sure CMake is installed
install:
- sudo apt-add-repository -y ppa:ubuntu-toolchain-r/test
- sudo add-apt-repository -y ppa:boost-latest/ppa
- sudo apt-get update >/dev/null
- sudo apt-get -q install libprotoc-dev libprotobuf7 libprotobuf-dev libosmpbf-dev libbz2-dev libstxxl-dev libstxxl1 libxml2-dev libzip-dev lua5.1 liblua5.1-0-dev rubygems
- sudo apt-get -q install g++-4.7
- sudo apt-get install libboost1.54-all-dev
#luabind
- curl https://gist.githubusercontent.com/DennisOSRM/f2eb7b948e6fe1ae319e/raw/install-luabind.sh | sudo bash
#osmosis
- curl -s https://gist.githubusercontent.com/DennisOSRM/803a64a9178ec375069f/raw/ | sudo bash
before_script:
- rvm use 1.9.3
- gem install bundler
- bundle install
- mkdir build
- cd build
- cmake .. $CMAKEOPTIONS
script:
- make -j 2
- cd ..
- cucumber -p verify
after_script:
# - cd ..
# - cucumber -p verify
branches:
only:
- master
- develop
cache:
- bundler
- apt
env:
- CMAKEOPTIONS="-DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=g++-4.7" OSRM_PORT=5000 OSRM_TIMEOUT=60
- CMAKEOPTIONS="-DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_COMPILER=g++-4.7" OSRM_PORT=5010 OSRM_TIMEOUT=60
notifications:
irc:
channels:
- irc.oftc.net#osrm
on_success: change
on_failure: always
use_notice: true
skip_join: false
recipients:
- dennis@mapbox.com
email:
on_success: change
on_failure: always

View File

@ -0,0 +1,140 @@
#ifndef __BFS_COMPONENT_EXPLORER_H__
#define __BFS_COMPONENT_EXPLORER_H__
#include "../typedefs.h"
#include "../DataStructures/DynamicGraph.h"
#include "../DataStructures/RestrictionMap.h"
#include <queue>
#include <unordered_set>
// Explores the components of the given graph while respecting turn restrictions
// and barriers.
template <typename GraphT> class BFSComponentExplorer
{
public:
BFSComponentExplorer(const GraphT &dynamicGraph,
const RestrictionMap &restrictions,
const std::unordered_set<NodeID> &barrier_nodes)
: m_graph(dynamicGraph), m_restriction_map(restrictions), m_barrier_nodes(barrier_nodes)
{
BOOST_ASSERT(m_graph.GetNumberOfNodes() > 0);
}
/*!
* Returns the size of the component that the node belongs to.
*/
inline unsigned int GetComponentSize(NodeID node)
{
BOOST_ASSERT(node < m_component_index_list.size());
return m_component_index_size[m_component_index_list[node]];
}
inline unsigned int GetNumberOfComponents() { return m_component_index_size.size(); }
/*!
* Computes the component sizes.
*/
void run()
{
std::queue<std::pair<NodeID, NodeID>> bfs_queue;
unsigned current_component = 0;
BOOST_ASSERT(m_component_index_list.empty());
BOOST_ASSERT(m_component_index_size.empty());
unsigned num_nodes = m_graph.GetNumberOfNodes();
m_component_index_list.resize(num_nodes, std::numeric_limits<unsigned>::max());
BOOST_ASSERT(num_nodes > 0);
// put unexplorered node with parent pointer into queue
for (NodeID node = 0; node < num_nodes; ++node)
{
if (std::numeric_limits<unsigned>::max() == m_component_index_list[node])
{
unsigned size = ExploreComponent(bfs_queue, node, current_component);
// push size into vector
m_component_index_size.emplace_back(size);
++current_component;
}
}
}
private:
/*!
* Explores the current component that starts at node using BFS.
*/
inline unsigned ExploreComponent(std::queue<std::pair<NodeID, NodeID>> &bfs_queue,
NodeID node,
unsigned current_component)
{
bfs_queue.emplace(node, node);
// mark node as read
m_component_index_list[node] = current_component;
unsigned current_component_size = 1;
while (!bfs_queue.empty())
{
// fetch element from BFS queue
std::pair<NodeID, NodeID> current_queue_item = bfs_queue.front();
bfs_queue.pop();
const NodeID v = current_queue_item.first; // current node
const NodeID u = current_queue_item.second; // parent
// increment size counter of current component
++current_component_size;
const bool is_barrier_node = (m_barrier_nodes.find(v) != m_barrier_nodes.end());
if (!is_barrier_node)
{
const NodeID to_node_of_only_restriction =
m_restriction_map.CheckForEmanatingIsOnlyTurn(u, v);
for (auto e2 : m_graph.GetAdjacentEdgeRange(v))
{
const NodeID w = m_graph.GetTarget(e2);
if (to_node_of_only_restriction != std::numeric_limits<unsigned>::max() &&
w != to_node_of_only_restriction)
{
// At an only_-restriction but not at the right turn
continue;
}
if (u != w)
{
// only add an edge if turn is not a U-turn except
// when it is at the end of a dead-end street.
if (!m_restriction_map.CheckIfTurnIsRestricted(u, v, w))
{
// only add an edge if turn is not prohibited
if (std::numeric_limits<unsigned>::max() == m_component_index_list[w])
{
// insert next (node, parent) only if w has
// not yet been explored
// mark node as read
m_component_index_list[w] = current_component;
bfs_queue.emplace(w, v);
}
}
}
}
}
}
return current_component_size;
}
std::vector<unsigned> m_component_index_list;
std::vector<NodeID> m_component_index_size;
const GraphT &m_graph;
const RestrictionMap &m_restriction_map;
const std::unordered_set<NodeID> &m_barrier_nodes;
};
#endif

View File

@ -0,0 +1,140 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <osrm/Coordinate.h>
#include "DouglasPeucker.h"
#include "../DataStructures/SegmentInformation.h"
#include "../Util/SimpleLogger.h"
#include <boost/assert.hpp>
#include <cmath>
#include <limits>
DouglasPeucker::DouglasPeucker()
: douglas_peucker_thresholds({262144., // z0
131072., // z1
65536., // z2
32768., // z3
16384., // z4
8192., // z5
4096., // z6
2048., // z7
960., // z8
480., // z9
240., // z10
90., // z11
50., // z12
25., // z13
15., // z14
5., // z15
.65, // z16
.5, // z17
.35 // z18
})
{
}
void DouglasPeucker::Run(std::vector<SegmentInformation> &input_geometry, const unsigned zoom_level)
{
input_geometry.front().necessary = true;
input_geometry.back().necessary = true;
BOOST_ASSERT_MSG(!input_geometry.empty(), "geometry invalid");
if (input_geometry.size() < 2)
{
return;
}
SimpleLogger().Write() << "input_geometry.size()=" << input_geometry.size();
{
BOOST_ASSERT_MSG(zoom_level < 19, "unsupported zoom level");
unsigned left_border = 0;
unsigned right_border = 1;
// Sweep over array and identify those ranges that need to be checked
do
{
if (!input_geometry[left_border].necessary)
{
SimpleLogger().Write() << "broken interval [" << left_border << "," << right_border << "]";
}
BOOST_ASSERT_MSG(input_geometry[left_border].necessary,
"left border must be necessary");
BOOST_ASSERT_MSG(input_geometry.back().necessary, "right border must be necessary");
if (input_geometry[right_border].necessary)
{
recursion_stack.emplace(left_border, right_border);
left_border = right_border;
}
++right_border;
} while (right_border < input_geometry.size());
}
while (!recursion_stack.empty())
{
// pop next element
const GeometryRange pair = recursion_stack.top();
recursion_stack.pop();
BOOST_ASSERT_MSG(input_geometry[pair.first].necessary, "left border mus be necessary");
BOOST_ASSERT_MSG(input_geometry[pair.second].necessary, "right border must be necessary");
BOOST_ASSERT_MSG(pair.second < input_geometry.size(), "right border outside of geometry");
BOOST_ASSERT_MSG(pair.first < pair.second, "left border on the wrong side");
double max_distance = std::numeric_limits<double>::min();
unsigned farthest_element_index = pair.second;
// find index idx of element with max_distance
for (unsigned i = pair.first + 1; i < pair.second; ++i)
{
const double temp_dist = FixedPointCoordinate::ComputePerpendicularDistance(
input_geometry[i].location,
input_geometry[pair.first].location,
input_geometry[pair.second].location);
const double distance = std::abs(temp_dist);
if (distance > douglas_peucker_thresholds[zoom_level] && distance > max_distance)
{
farthest_element_index = i;
max_distance = distance;
}
}
if (max_distance > douglas_peucker_thresholds[zoom_level])
{
// mark idx as necessary
input_geometry[farthest_element_index].necessary = true;
if (1 < (farthest_element_index - pair.first))
{
recursion_stack.emplace(pair.first, farthest_element_index);
}
if (1 < (pair.second - farthest_element_index))
{
recursion_stack.emplace(farthest_element_index, pair.second);
}
}
}
}

View File

@ -0,0 +1,62 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef DOUGLASPEUCKER_H_
#define DOUGLASPEUCKER_H_
#include <stack>
#include <vector>
/* This class object computes the bitvector of indicating generalized input
* points according to the (Ramer-)Douglas-Peucker algorithm.
*
* Input is vector of pairs. Each pair consists of the point information and a
* bit indicating if the points is present in the generalization.
* Note: points may also be pre-selected*/
struct FixedPointCoordinate;
struct SegmentInformation;
class DouglasPeucker
{
private:
std::vector<double> douglas_peucker_thresholds;
typedef std::pair<unsigned, unsigned> GeometryRange;
// Stack to simulate the recursion
std::stack<GeometryRange> recursion_stack;
double ComputeDistance(const FixedPointCoordinate &point,
const FixedPointCoordinate &segA,
const FixedPointCoordinate &segB) const;
public:
DouglasPeucker();
void Run(std::vector<SegmentInformation> &input_geometry, const unsigned zoom_level);
};
#endif /* DOUGLASPEUCKER_H_ */

View File

@ -0,0 +1,142 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef EXTRACT_ROUTE_NAMES_H
#define EXTRACT_ROUTE_NAMES_H
#include <boost/assert.hpp>
#include <algorithm>
#include <string>
#include <vector>
struct RouteNames
{
std::string shortest_path_name_1;
std::string shortest_path_name_2;
std::string alternative_path_name_1;
std::string alternative_path_name_2;
};
// construct routes names
template <class DataFacadeT, class SegmentT> struct ExtractRouteNames
{
RouteNames operator()(std::vector<SegmentT> &shortest_path_segments,
std::vector<SegmentT> &alternative_path_segments,
const DataFacadeT *facade)
{
RouteNames route_names;
SegmentT shortest_segment_1, shortest_segment_2;
SegmentT alternative_segment_1, alternative_segment_2;
auto length_comperator = [](SegmentT a, SegmentT b)
{ return a.length > b.length; };
auto name_id_comperator = [](SegmentT a, SegmentT b)
{ return a.name_id < b.name_id; };
if (shortest_path_segments.empty())
{
return route_names;
}
std::sort(shortest_path_segments.begin(), shortest_path_segments.end(), length_comperator);
shortest_segment_1 = shortest_path_segments[0];
if (!alternative_path_segments.empty())
{
std::sort(alternative_path_segments.begin(),
alternative_path_segments.end(),
length_comperator);
alternative_segment_1 = alternative_path_segments[0];
}
std::vector<SegmentT> shortest_path_set_difference(shortest_path_segments.size());
std::vector<SegmentT> alternative_path_set_difference(alternative_path_segments.size());
std::set_difference(shortest_path_segments.begin(),
shortest_path_segments.end(),
alternative_path_segments.begin(),
alternative_path_segments.end(),
shortest_path_set_difference.begin(),
name_id_comperator);
int size_of_difference = shortest_path_set_difference.size();
if (size_of_difference)
{
int i = 0;
while (i < size_of_difference &&
shortest_path_set_difference[i].name_id == shortest_path_segments[0].name_id)
{
++i;
}
if (i < size_of_difference)
{
shortest_segment_2 = shortest_path_set_difference[i];
}
}
std::set_difference(alternative_path_segments.begin(),
alternative_path_segments.end(),
shortest_path_segments.begin(),
shortest_path_segments.end(),
alternative_path_set_difference.begin(),
name_id_comperator);
size_of_difference = alternative_path_set_difference.size();
if (size_of_difference)
{
int i = 0;
while (i < size_of_difference &&
alternative_path_set_difference[i].name_id ==
alternative_path_segments[0].name_id)
{
++i;
}
if (i < size_of_difference)
{
alternative_segment_2 = alternative_path_set_difference[i];
}
}
if (shortest_segment_1.position > shortest_segment_2.position)
{
std::swap(shortest_segment_1, shortest_segment_2);
}
if (alternative_segment_1.position > alternative_segment_2.position)
{
std::swap(alternative_segment_1, alternative_segment_2);
}
route_names.shortest_path_name_1 =
facade->GetEscapedNameForNameID(shortest_segment_1.name_id);
route_names.shortest_path_name_2 =
facade->GetEscapedNameForNameID(shortest_segment_2.name_id);
route_names.alternative_path_name_1 =
facade->GetEscapedNameForNameID(alternative_segment_1.name_id);
route_names.alternative_path_name_2 =
facade->GetEscapedNameForNameID(alternative_segment_2.name_id);
return route_names;
}
};
#endif // EXTRACT_ROUTE_NAMES_H

View File

@ -0,0 +1,144 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ITERATOR_BASED_CRC32_H
#define ITERATOR_BASED_CRC32_H
#include "../Util/SimpleLogger.h"
#include <iostream>
#if defined(__x86_64__)
#include <cpuid.h>
#else
#include <boost/crc.hpp> // for boost::crc_32_type
inline void __get_cpuid(int param, unsigned *eax, unsigned *ebx, unsigned *ecx, unsigned *edx)
{
*ecx = 0;
}
#endif
template <class ContainerT> class IteratorbasedCRC32
{
private:
typedef typename ContainerT::iterator IteratorType;
unsigned crc;
bool use_SSE42_CRC_function;
#if !defined(__x86_64__)
boost::crc_optimal<32, 0x1EDC6F41, 0x0, 0x0, true, true> CRC32_processor;
#endif
unsigned SoftwareBasedCRC32(char *str, unsigned len)
{
#if !defined(__x86_64__)
CRC32_processor.process_bytes(str, len);
return CRC32_processor.checksum();
#else
return 0;
#endif
}
// adapted from http://byteworm.com/2010/10/13/crc32/
unsigned SSE42BasedCRC32(char *str, unsigned len)
{
#if defined(__x86_64__)
unsigned q = len / sizeof(unsigned);
unsigned r = len % sizeof(unsigned);
unsigned *p = (unsigned *)str;
// crc=0;
while (q--)
{
__asm__ __volatile__(".byte 0xf2, 0xf, 0x38, 0xf1, 0xf1;"
: "=S"(crc)
: "0"(crc), "c"(*p));
++p;
}
str = (char *)p;
while (r--)
{
__asm__ __volatile__(".byte 0xf2, 0xf, 0x38, 0xf1, 0xf1;"
: "=S"(crc)
: "0"(crc), "c"(*str));
++str;
}
#endif
return crc;
}
inline unsigned cpuid() const
{
unsigned eax = 0, ebx = 0, ecx = 0, edx = 0;
// on X64 this calls hardware cpuid(.) instr. otherwise a dummy impl.
__get_cpuid(1, &eax, &ebx, &ecx, &edx);
return ecx;
}
bool DetectNativeCRC32Support()
{
static const int SSE42_BIT = 0x00100000;
const unsigned ecx = cpuid();
const bool has_SSE42 = ecx & SSE42_BIT;
if (has_SSE42)
{
SimpleLogger().Write() << "using hardware based CRC32 computation";
}
else
{
SimpleLogger().Write() << "using software based CRC32 computation";
}
return has_SSE42;
}
public:
IteratorbasedCRC32() : crc(0) { use_SSE42_CRC_function = DetectNativeCRC32Support(); }
unsigned operator()(IteratorType iter, const IteratorType end)
{
unsigned crc = 0;
while (iter != end)
{
char *data = reinterpret_cast<char *>(&(*iter));
if (use_SSE42_CRC_function)
{
crc = SSE42BasedCRC32(data, sizeof(typename ContainerT::value_type));
}
else
{
crc = SoftwareBasedCRC32(data, sizeof(typename ContainerT::value_type));
}
++iter;
}
return crc;
}
};
#endif /* ITERATOR_BASED_CRC32_H */

View File

@ -0,0 +1,99 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef OBJECTTOBASE64_H_
#define OBJECTTOBASE64_H_
#include "../Util/StringUtil.h"
#include <boost/assert.hpp>
#include <boost/archive/iterators/base64_from_binary.hpp>
#include <boost/archive/iterators/binary_from_base64.hpp>
#include <boost/archive/iterators/transform_width.hpp>
#include <algorithm>
#include <string>
#include <vector>
typedef
boost::archive::iterators::base64_from_binary<
boost::archive::iterators::transform_width<const char *, 6, 8>
> base64_t;
typedef
boost::archive::iterators::transform_width<
boost::archive::iterators::binary_from_base64<
std::string::const_iterator>, 8, 6
> binary_t;
template<class ObjectT>
static void EncodeObjectToBase64(const ObjectT & object, std::string& encoded) {
const char * char_ptr_to_object = (const char *)&object;
std::vector<unsigned char> data(sizeof(object));
std::copy(
char_ptr_to_object,
char_ptr_to_object + sizeof(ObjectT),
data.begin()
);
unsigned char number_of_padded_chars = 0; // is in {0,1,2};
while(data.size() % 3 != 0) {
++number_of_padded_chars;
data.push_back(0x00);
}
BOOST_ASSERT_MSG(
0 == data.size() % 3,
"base64 input data size is not a multiple of 3!"
);
encoded.resize(sizeof(ObjectT));
encoded.assign(
base64_t( &data[0] ),
base64_t( &data[0] + (data.size() - number_of_padded_chars) )
);
replaceAll(encoded, "+", "-");
replaceAll(encoded, "/", "_");
}
template<class ObjectT>
static void DecodeObjectFromBase64(const std::string& input, ObjectT & object) {
try {
std::string encoded(input);
//replace "-" with "+" and "_" with "/"
replaceAll(encoded, "-", "+");
replaceAll(encoded, "_", "/");
std::copy (
binary_t( encoded.begin() ),
binary_t( encoded.begin() + encoded.length() - 1),
(char *)&object
);
} catch(...) { }
}
#endif /* OBJECTTOBASE64_H_ */

View File

@ -0,0 +1,151 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "PolylineCompressor.h"
#include "../Util/StringUtil.h"
//TODO: return vector of start indices for each leg
void PolylineCompressor::encodeVectorSignedNumber(std::vector<int> &numbers, std::string &output)
const
{
const unsigned end = numbers.size();
for (unsigned i = 0; i < end; ++i)
{
numbers[i] <<= 1;
if (numbers[i] < 0)
{
numbers[i] = ~(numbers[i]);
}
}
for (const int number: numbers)
{
encodeNumber(number, output);
}
}
void PolylineCompressor::encodeNumber(int number_to_encode, std::string &output) const
{
while (number_to_encode >= 0x20)
{
int next_value = (0x20 | (number_to_encode & 0x1f)) + 63;
output += static_cast<char>(next_value);
if (92 == next_value)
{
output += static_cast<char>(next_value);
}
number_to_encode >>= 5;
}
number_to_encode += 63;
output += static_cast<char>(number_to_encode);
if (92 == number_to_encode)
{
output += static_cast<char>(number_to_encode);
}
}
JSON::String PolylineCompressor::printEncodedString(const std::vector<SegmentInformation> &polyline) const
{
std::string output;
std::vector<int> delta_numbers;
if (!polyline.empty())
{
FixedPointCoordinate last_coordinate = polyline[0].location;
delta_numbers.emplace_back(last_coordinate.lat);
delta_numbers.emplace_back(last_coordinate.lon);
for (const auto & segment : polyline)
{
if (segment.necessary)
{
int lat_diff = segment.location.lat - last_coordinate.lat;
int lon_diff = segment.location.lon - last_coordinate.lon;
delta_numbers.emplace_back(lat_diff);
delta_numbers.emplace_back(lon_diff);
last_coordinate = segment.location;
}
}
encodeVectorSignedNumber(delta_numbers, output);
}
JSON::String return_value(output);
return return_value;
}
JSON::String PolylineCompressor::printEncodedString(const std::vector<FixedPointCoordinate> &polyline) const
{
std::string output;
std::vector<int> delta_numbers(2 * polyline.size());
if (!polyline.empty())
{
delta_numbers[0] = polyline[0].lat;
delta_numbers[1] = polyline[0].lon;
for (unsigned i = 1; i < polyline.size(); ++i)
{
int lat_diff = polyline[i].lat - polyline[i - 1].lat;
int lon_diff = polyline[i].lon - polyline[i - 1].lon;
delta_numbers[(2 * i)] = (lat_diff);
delta_numbers[(2 * i) + 1] = (lon_diff);
}
encodeVectorSignedNumber(delta_numbers, output);
}
JSON::String return_value(output);
return return_value;
}
JSON::Array PolylineCompressor::printUnencodedString(const std::vector<FixedPointCoordinate> &polyline) const
{
JSON::Array json_geometry_array;
for( const auto & coordinate : polyline)
{
std::string tmp, output;
FixedPointCoordinate::convertInternalLatLonToString(coordinate.lat, tmp);
output += (tmp + ",");
FixedPointCoordinate::convertInternalLatLonToString(coordinate.lon, tmp);
output += tmp;
json_geometry_array.values.push_back(output);
}
return json_geometry_array;
}
JSON::Array PolylineCompressor::printUnencodedString(const std::vector<SegmentInformation> &polyline) const
{
JSON::Array json_geometry_array;
for( const auto & segment : polyline)
{
if (segment.necessary)
{
std::string tmp, output;
FixedPointCoordinate::convertInternalLatLonToString(segment.location.lat, tmp);
output += (tmp + ",");
FixedPointCoordinate::convertInternalLatLonToString(segment.location.lon, tmp);
output += tmp;
json_geometry_array.values.push_back(output);
}
}
return json_geometry_array;
}

View File

@ -1,6 +1,6 @@
/*
Copyright (c) 2017, Project OSRM contributors
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
@ -25,29 +25,30 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ENGINE_API_NEAREST_PARAMETERS_HPP
#define ENGINE_API_NEAREST_PARAMETERS_HPP
#ifndef POLYLINECOMPRESSOR_H_
#define POLYLINECOMPRESSOR_H_
#include "engine/api/base_parameters.hpp"
#include "../DataStructures/JSONContainer.h"
#include "../DataStructures/SegmentInformation.h"
namespace osrm::engine::api
#include <string>
#include <vector>
class PolylineCompressor
{
private:
void encodeVectorSignedNumber(std::vector<int> &numbers, std::string &output) const;
/**
* Parameters specific to the OSRM Nearest service.
*
* Holds member attributes:
* - number of results: number of nearest segments that should be returned
*
* \see OSRM, Coordinate, Hint, Bearing, RouteParame, RouteParameters, TableParameters,
* NearestParameters, TripParameters, MatchParameters and TileParameters
*/
struct NearestParameters : public BaseParameters
{
unsigned number_of_results = 1;
void encodeNumber(int number_to_encode, std::string &output) const;
bool IsValid() const { return BaseParameters::IsValid() && number_of_results >= 1; }
public:
JSON::String printEncodedString(const std::vector<SegmentInformation> &polyline) const;
JSON::String printEncodedString(const std::vector<FixedPointCoordinate> &polyline) const;
JSON::Array printUnencodedString(const std::vector<FixedPointCoordinate> &polyline) const;
JSON::Array printUnencodedString(const std::vector<SegmentInformation> &polyline) const;
};
} // namespace osrm::engine::api
#endif // ENGINE_API_NEAREST_PARAMETERS_HPP
#endif /* POLYLINECOMPRESSOR_H_ */

View File

@ -0,0 +1,459 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef STRONGLYCONNECTEDCOMPONENTS_H_
#define STRONGLYCONNECTEDCOMPONENTS_H_
#include "../typedefs.h"
#include "../DataStructures/DeallocatingVector.h"
#include "../DataStructures/DynamicGraph.h"
#include "../DataStructures/ImportEdge.h"
#include "../DataStructures/QueryNode.h"
#include "../DataStructures/Percent.h"
#include "../DataStructures/Restriction.h"
#include "../DataStructures/TurnInstructions.h"
#include "../Util/SimpleLogger.h"
#include "../Util/StdHashExtensions.h"
#include <osrm/Coordinate.h>
#include <boost/assert.hpp>
#include <boost/filesystem.hpp>
#ifdef __APPLE__
#include <gdal.h>
#include <ogrsf_frmts.h>
#else
#include <gdal/gdal.h>
#include <gdal/ogrsf_frmts.h>
#endif
#include <cstdint>
#include <memory>
#include <stack>
#include <unordered_map>
#include <unordered_set>
#include <vector>
class TarjanSCC
{
private:
struct TarjanNode
{
TarjanNode() : index(UINT_MAX), low_link(UINT_MAX), on_stack(false) {}
unsigned index;
unsigned low_link;
bool on_stack;
};
struct TarjanEdgeData
{
int distance;
unsigned name_id : 31;
bool shortcut : 1;
short type;
bool forward : 1;
bool backward : 1;
bool reversedEdge : 1;
};
struct TarjanStackFrame
{
explicit TarjanStackFrame(NodeID v, NodeID parent) : v(v), parent(parent) {}
NodeID v;
NodeID parent;
};
typedef DynamicGraph<TarjanEdgeData> TarjanDynamicGraph;
typedef TarjanDynamicGraph::InputEdge TarjanEdge;
typedef std::pair<NodeID, NodeID> RestrictionSource;
typedef std::pair<NodeID, bool> restriction_target;
typedef std::vector<restriction_target> EmanatingRestrictionsVector;
typedef std::unordered_map<RestrictionSource, unsigned> RestrictionMap;
std::vector<NodeInfo> m_coordinate_list;
std::vector<EmanatingRestrictionsVector> m_restriction_bucket_list;
std::shared_ptr<TarjanDynamicGraph> m_node_based_graph;
std::unordered_set<NodeID> m_barrier_node_list;
std::unordered_set<NodeID> m_traffic_light_list;
unsigned m_restriction_counter;
RestrictionMap m_restriction_map;
public:
TarjanSCC(int number_of_nodes,
std::vector<NodeBasedEdge> &input_edges,
std::vector<NodeID> &bn,
std::vector<NodeID> &tl,
std::vector<TurnRestriction> &irs,
std::vector<NodeInfo> &nI)
: m_coordinate_list(nI), m_restriction_counter(irs.size())
{
for (const TurnRestriction &restriction : irs)
{
std::pair<NodeID, NodeID> restrictionSource = {restriction.fromNode,
restriction.viaNode};
unsigned index;
RestrictionMap::iterator restriction_iterator =
m_restriction_map.find(restrictionSource);
if (restriction_iterator == m_restriction_map.end())
{
index = m_restriction_bucket_list.size();
m_restriction_bucket_list.resize(index + 1);
m_restriction_map.emplace(restrictionSource, index);
}
else
{
index = restriction_iterator->second;
// Map already contains an is_only_*-restriction
if (m_restriction_bucket_list.at(index).begin()->second)
{
continue;
}
else if (restriction.flags.isOnly)
{
// We are going to insert an is_only_*-restriction. There can be only one.
m_restriction_bucket_list.at(index).clear();
}
}
m_restriction_bucket_list.at(index)
.emplace_back(restriction.toNode, restriction.flags.isOnly);
}
m_barrier_node_list.insert(bn.begin(), bn.end());
m_traffic_light_list.insert(tl.begin(), tl.end());
DeallocatingVector<TarjanEdge> edge_list;
for (const NodeBasedEdge &input_edge : input_edges)
{
if (input_edge.source() == input_edge.target())
{
continue;
}
TarjanEdge edge;
if (input_edge.isForward())
{
edge.source = input_edge.source();
edge.target = input_edge.target();
edge.data.forward = input_edge.isForward();
edge.data.backward = input_edge.isBackward();
}
else
{
edge.source = input_edge.target();
edge.target = input_edge.source();
edge.data.backward = input_edge.isForward();
edge.data.forward = input_edge.isBackward();
}
edge.data.distance = (std::max)((int)input_edge.weight(), 1);
BOOST_ASSERT(edge.data.distance > 0);
edge.data.shortcut = false;
// edge.data.roundabout = input_edge.isRoundabout();
// edge.data.ignoreInGrid = input_edge.ignoreInGrid();
edge.data.name_id = input_edge.name();
edge.data.type = input_edge.type();
// edge.data.isAccessRestricted = input_edge.isAccessRestricted();
edge.data.reversedEdge = false;
edge_list.push_back(edge);
if (edge.data.backward)
{
std::swap(edge.source, edge.target);
edge.data.forward = input_edge.isBackward();
edge.data.backward = input_edge.isForward();
edge.data.reversedEdge = true;
edge_list.push_back(edge);
}
}
std::vector<NodeBasedEdge>().swap(input_edges);
BOOST_ASSERT_MSG(0 == input_edges.size() && 0 == input_edges.capacity(),
"input edge vector not properly deallocated");
std::sort(edge_list.begin(), edge_list.end());
m_node_based_graph = std::make_shared<TarjanDynamicGraph>(number_of_nodes, edge_list);
}
~TarjanSCC() { m_node_based_graph.reset(); }
void Run()
{
// remove files from previous run if exist
DeleteFileIfExists("component.dbf");
DeleteFileIfExists("component.shx");
DeleteFileIfExists("component.shp");
Percent p(m_node_based_graph->GetNumberOfNodes());
OGRRegisterAll();
const char *pszDriverName = "ESRI Shapefile";
OGRSFDriver *poDriver =
OGRSFDriverRegistrar::GetRegistrar()->GetDriverByName(pszDriverName);
if (NULL == poDriver)
{
throw OSRMException("ESRI Shapefile driver not available");
}
OGRDataSource *poDS = poDriver->CreateDataSource("component.shp", NULL);
if (NULL == poDS)
{
throw OSRMException("Creation of output file failed");
}
OGRLayer *poLayer = poDS->CreateLayer("component", NULL, wkbLineString, NULL);
if (NULL == poLayer)
{
throw OSRMException("Layer creation failed.");
}
// The following is a hack to distinguish between stuff that happens
// before the recursive call and stuff that happens after
std::stack<std::pair<bool, TarjanStackFrame>> recursion_stack;
// true = stuff before, false = stuff after call
std::stack<NodeID> tarjan_stack;
std::vector<unsigned> components_index(m_node_based_graph->GetNumberOfNodes(), UINT_MAX);
std::vector<NodeID> component_size_vector;
std::vector<TarjanNode> tarjan_node_list(m_node_based_graph->GetNumberOfNodes());
unsigned component_index = 0, size_of_current_component = 0;
int index = 0;
NodeID last_node = m_node_based_graph->GetNumberOfNodes();
for (NodeID node = 0; node < last_node; ++node)
{
if (UINT_MAX == components_index[node])
{
recursion_stack.emplace(true, TarjanStackFrame(node, node));
}
while (!recursion_stack.empty())
{
const bool before_recursion = recursion_stack.top().first;
TarjanStackFrame currentFrame = recursion_stack.top().second;
NodeID v = currentFrame.v;
recursion_stack.pop();
if (before_recursion)
{
// Mark frame to handle tail of recursion
recursion_stack.emplace(false, currentFrame);
// Mark essential information for SCC
tarjan_node_list[v].index = index;
tarjan_node_list[v].low_link = index;
tarjan_stack.push(v);
tarjan_node_list[v].on_stack = true;
++index;
// Traverse outgoing edges
for (auto e2 : m_node_based_graph->GetAdjacentEdgeRange(v))
{
const TarjanDynamicGraph::NodeIterator vprime =
m_node_based_graph->GetTarget(e2);
if (UINT_MAX == tarjan_node_list[vprime].index)
{
recursion_stack.emplace(true, TarjanStackFrame(vprime, v));
}
else
{
if (tarjan_node_list[vprime].on_stack &&
tarjan_node_list[vprime].index < tarjan_node_list[v].low_link)
{
tarjan_node_list[v].low_link = tarjan_node_list[vprime].index;
}
}
}
}
else
{
tarjan_node_list[currentFrame.parent].low_link =
std::min(tarjan_node_list[currentFrame.parent].low_link,
tarjan_node_list[v].low_link);
// after recursion, lets do cycle checking
// Check if we found a cycle. This is the bottom part of the recursion
if (tarjan_node_list[v].low_link == tarjan_node_list[v].index)
{
NodeID vprime;
do
{
vprime = tarjan_stack.top();
tarjan_stack.pop();
tarjan_node_list[vprime].on_stack = false;
components_index[vprime] = component_index;
++size_of_current_component;
} while (v != vprime);
component_size_vector.emplace_back(size_of_current_component);
if (size_of_current_component > 1000)
{
SimpleLogger().Write() << "large component [" << component_index
<< "]=" << size_of_current_component;
}
++component_index;
size_of_current_component = 0;
}
}
}
}
SimpleLogger().Write() << "identified: " << component_size_vector.size()
<< " many components, marking small components";
// TODO/C++11: prime candidate for lambda function
// unsigned size_one_counter = 0;
// for (unsigned i = 0, end = component_size_vector.size(); i < end; ++i)
// {
// if (1 == component_size_vector[i])
// {
// ++size_one_counter;
// }
// }
unsigned size_one_counter = std::count_if(component_size_vector.begin(),
component_size_vector.end(),
[] (unsigned value) { return 1 == value;});
SimpleLogger().Write() << "identified " << size_one_counter << " SCCs of size 1";
uint64_t total_network_distance = 0;
p.reinit(m_node_based_graph->GetNumberOfNodes());
NodeID last_u_node = m_node_based_graph->GetNumberOfNodes();
for (NodeID u = 0; u < last_u_node; ++u)
{
p.printIncrement();
for (auto e1 : m_node_based_graph->GetAdjacentEdgeRange(u))
{
if (!m_node_based_graph->GetEdgeData(e1).reversedEdge)
{
continue;
}
const TarjanDynamicGraph::NodeIterator v = m_node_based_graph->GetTarget(e1);
total_network_distance +=
100 * FixedPointCoordinate::ApproximateDistance(m_coordinate_list[u].lat,
m_coordinate_list[u].lon,
m_coordinate_list[v].lat,
m_coordinate_list[v].lon);
if (SHRT_MAX != m_node_based_graph->GetEdgeData(e1).type)
{
BOOST_ASSERT(e1 != UINT_MAX);
BOOST_ASSERT(u != UINT_MAX);
BOOST_ASSERT(v != UINT_MAX);
const unsigned size_of_containing_component =
std::min(component_size_vector[components_index[u]],
component_size_vector[components_index[v]]);
// edges that end on bollard nodes may actually be in two distinct components
if (size_of_containing_component < 10)
{
OGRLineString lineString;
lineString.addPoint(m_coordinate_list[u].lon / COORDINATE_PRECISION,
m_coordinate_list[u].lat / COORDINATE_PRECISION);
lineString.addPoint(m_coordinate_list[v].lon / COORDINATE_PRECISION,
m_coordinate_list[v].lat / COORDINATE_PRECISION);
OGRFeature *poFeature = OGRFeature::CreateFeature(poLayer->GetLayerDefn());
poFeature->SetGeometry(&lineString);
if (OGRERR_NONE != poLayer->CreateFeature(poFeature))
{
throw OSRMException("Failed to create feature in shapefile.");
}
OGRFeature::DestroyFeature(poFeature);
}
}
}
}
OGRDataSource::DestroyDataSource(poDS);
std::vector<NodeID>().swap(component_size_vector);
BOOST_ASSERT_MSG(0 == component_size_vector.size() && 0 == component_size_vector.capacity(),
"component_size_vector not properly deallocated");
std::vector<NodeID>().swap(components_index);
BOOST_ASSERT_MSG(0 == components_index.size() && 0 == components_index.capacity(),
"icomponents_index not properly deallocated");
SimpleLogger().Write() << "total network distance: " << (uint64_t)total_network_distance /
100 / 1000. << " km";
}
private:
unsigned CheckForEmanatingIsOnlyTurn(const NodeID u, const NodeID v) const
{
std::pair<NodeID, NodeID> restriction_source = {u, v};
RestrictionMap::const_iterator restriction_iterator =
m_restriction_map.find(restriction_source);
if (restriction_iterator != m_restriction_map.end())
{
const unsigned index = restriction_iterator->second;
for (const RestrictionSource &restriction_target : m_restriction_bucket_list.at(index))
{
if (restriction_target.second)
{
return restriction_target.first;
}
}
}
return UINT_MAX;
}
bool CheckIfTurnIsRestricted(const NodeID u, const NodeID v, const NodeID w) const
{
// only add an edge if turn is not a U-turn except it is the end of dead-end street.
std::pair<NodeID, NodeID> restriction_source = {u, v};
RestrictionMap::const_iterator restriction_iterator =
m_restriction_map.find(restriction_source);
if (restriction_iterator != m_restriction_map.end())
{
const unsigned index = restriction_iterator->second;
for (const restriction_target &restriction_target : m_restriction_bucket_list.at(index))
{
if (w == restriction_target.first)
{
return true;
}
}
}
return false;
}
void DeleteFileIfExists(const std::string &file_name) const
{
if (boost::filesystem::exists(file_name))
{
boost::filesystem::remove(file_name);
}
}
};
#endif /* STRONGLYCONNECTEDCOMPONENTS_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,755 +1,282 @@
cmake_minimum_required(VERSION 3.18)
set(CMAKE_CXX_STANDARD 20)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
if(CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_CURRENT_BINARY_DIR AND NOT MSVC_IDE)
message(FATAL_ERROR "In-source builds are not allowed.
Please create a directory and run cmake from there, passing the path to this source directory as the last argument.
This process created the file `CMakeCache.txt' and the directory `CMakeFiles'. Please delete them.")
endif()
# detect if this is included as subproject and if so expose
# some variables to its parent scope
get_directory_property(BUILD_AS_SUBPROJECT PARENT_DIRECTORY)
if(BUILD_AS_SUBPROJECT)
message(STATUS "Building libosrm as subproject.")
endif()
# set OSRM_BUILD_DIR location (might be used in various scripts)
if (NOT WIN32 AND NOT DEFINED ENV{OSRM_BUILD_DIR})
set(ENV{OSRM_BUILD_DIR} ${CMAKE_CURRENT_BINARY_DIR})
endif()
option(ENABLE_CONAN "Use conan for dependencies" OFF)
option(ENABLE_CCACHE "Speed up incremental rebuilds via ccache" ON)
option(BUILD_TOOLS "Build OSRM tools" OFF)
option(BUILD_PACKAGE "Build OSRM package" OFF)
option(BUILD_ROUTED "Build osrm-routed HTTP server" ON)
option(ENABLE_ASSERTIONS "Use assertions in release mode" OFF)
option(ENABLE_DEBUG_LOGGING "Use debug logging in release mode" OFF)
option(ENABLE_COVERAGE "Build with coverage instrumentalisation" OFF)
option(ENABLE_SANITIZER "Use memory sanitizer for Debug build" OFF)
option(ENABLE_LTO "Use Link Time Optimisation" ON)
option(ENABLE_FUZZING "Fuzz testing using LLVM's libFuzzer" OFF)
option(ENABLE_NODE_BINDINGS "Build NodeJs bindings" OFF)
option(ENABLE_CLANG_TIDY "Enables clang-tidy checks" OFF)
if (ENABLE_CLANG_TIDY)
find_program(CLANG_TIDY_COMMAND NAMES clang-tidy)
if(NOT CLANG_TIDY_COMMAND)
message(FATAL_ERROR "ENABLE_CLANG_TIDY is ON but clang-tidy is not found!")
else()
message(STATUS "Found clang-tidy at ${CLANG_TIDY_COMMAND}")
set(CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_COMMAND};--warnings-as-errors=*;--header-filter=.*")
endif()
endif()
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
# be compatible with version handling before cmake 3.x
if (POLICY CMP0057)
cmake_policy(SET CMP0057 NEW)
endif()
if (POLICY CMP0074)
cmake_policy(SET CMP0074 NEW)
endif()
if (POLICY CMP0167)
cmake_policy(SET CMP0167 NEW)
endif()
project(OSRM C CXX)
if(ENABLE_LTO AND (CMAKE_BUILD_TYPE MATCHES Release OR CMAKE_BUILD_TYPE MATCHES MinRelSize OR CMAKE_BUILD_TYPE MATCHES RelWithDebInfo))
include(CheckIPOSupported)
check_ipo_supported(RESULT LTO_SUPPORTED OUTPUT error)
if(LTO_SUPPORTED)
message(STATUS "IPO / LTO enabled")
set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE)
else()
message(FATAL_ERROR "IPO / LTO not supported: <${error}>")
endif()
endif()
# add @loader_path/$ORIGIN to rpath to make binaries relocatable
if (APPLE)
set(CMAKE_BUILD_RPATH "@loader_path")
else()
set(CMAKE_BUILD_RPATH "\$ORIGIN")
# https://stackoverflow.com/questions/6324131/rpath-origin-not-having-desired-effect
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,-z,origin")
endif()
include(JSONParser)
file(READ "package.json" packagejsonraw)
sbeParseJson(packagejson packagejsonraw)
# This regex is not strict enough, but the correct one is too complicated for cmake matching.
# https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string
if (packagejson.version MATCHES "^([0-9]+)\.([0-9]+)\.([0-9]+)([-+][0-9a-zA-Z.-]+)?$")
set(OSRM_VERSION_MAJOR ${CMAKE_MATCH_1})
set(OSRM_VERSION_MINOR ${CMAKE_MATCH_2})
set(OSRM_VERSION_PATCH ${CMAKE_MATCH_3})
set(OSRM_VERSION_PRERELEASE_BUILD ${CMAKE_MATCH_4})
set(OSRM_VERSION packagejson.version)
else()
message(FATAL_ERROR "Version from package.json cannot be parsed, expected semver compatible label, but found ${packagejson.version}")
endif()
if (MSVC)
add_definitions("-DOSRM_PROJECT_DIR=\"${CMAKE_CURRENT_SOURCE_DIR}\"")
else()
add_definitions(-DOSRM_PROJECT_DIR="${CMAKE_CURRENT_SOURCE_DIR}")
endif()
# these two functions build up custom variables:
# DEPENDENCIES_INCLUDE_DIRS and OSRM_DEFINES
# These variables we want to pass to
# include_directories and add_definitions for both
# this build and for sharing externally via pkg-config
function(add_dependency_includes)
if(${ARGC} GREATER 0)
list(APPEND DEPENDENCIES_INCLUDE_DIRS "${ARGV}")
set(DEPENDENCIES_INCLUDE_DIRS "${DEPENDENCIES_INCLUDE_DIRS}" PARENT_SCOPE)
endif()
endfunction(add_dependency_includes)
function(add_dependency_defines defines)
list(APPEND OSRM_DEFINES "${defines}")
set(OSRM_DEFINES "${OSRM_DEFINES}" PARENT_SCOPE)
endfunction(add_dependency_defines)
cmake_minimum_required(VERSION 2.8)
project(OSRM)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
include(CheckCXXCompilerFlag)
include(FindPackageHandleStandardArgs)
include(GNUInstallDirs)
list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake")
include(GetGitRevisionDescription)
git_describe(GIT_DESCRIPTION)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_CURRENT_SOURCE_DIR}/cmake)
set(bitness 32)
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
set(bitness 64)
message(STATUS "Building on a 64 bit system")
else()
message(FATAL_ERROR "Building on a 32 bit system is not supported")
message(WARNING "Building on a 32 bit system is unsupported")
endif()
include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/include/)
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/include/)
include_directories(SYSTEM ${CMAKE_CURRENT_SOURCE_DIR}/generated/include/)
include_directories(SYSTEM ${CMAKE_CURRENT_SOURCE_DIR}/third_party/sol2/include)
include_directories(${CMAKE_SOURCE_DIR}/Include/)
set(BOOST_COMPONENTS date_time iostreams program_options thread unit_test_framework)
add_custom_command(OUTPUT ${CMAKE_SOURCE_DIR}/Util/UUID.cpp UUID.cpp.alwaysbuild
COMMAND ${CMAKE_COMMAND} -DSOURCE_DIR=${CMAKE_SOURCE_DIR}
-P ${CMAKE_CURRENT_SOURCE_DIR}/cmake/UUID-Config.cmake
DEPENDS
${CMAKE_SOURCE_DIR}/Util/UUID.cpp.in
${CMAKE_SOURCE_DIR}/cmake/UUID-Config.cmake
COMMENT "Configuring UUID.cpp"
VERBATIM)
add_custom_target(UUIDConfigure DEPENDS ${CMAKE_SOURCE_DIR}/Util/UUID.cpp)
set(BOOST_COMPONENTS date_time filesystem iostreams program_options regex system thread)
configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/include/util/version.hpp.in
${CMAKE_CURRENT_BINARY_DIR}/include/util/version.hpp
${CMAKE_SOURCE_DIR}/Util/GitDescription.cpp.in
${CMAKE_SOURCE_DIR}/Util/GitDescription.cpp
)
file(GLOB UtilGlob src/util/*.cpp src/util/*/*.cpp)
file(GLOB ExtractorGlob src/extractor/*.cpp src/extractor/*/*.cpp)
file(GLOB GuidanceGlob src/guidance/*.cpp src/extractor/intersection/*.cpp)
file(GLOB PartitionerGlob src/partitioner/*.cpp)
file(GLOB CustomizerGlob src/customize/*.cpp)
file(GLOB ContractorGlob src/contractor/*.cpp)
file(GLOB UpdaterGlob src/updater/*.cpp)
file(GLOB StorageGlob src/storage/*.cpp)
file(GLOB ServerGlob src/server/*.cpp src/server/**/*.cpp)
file(GLOB EngineGlob src/engine/*.cpp src/engine/**/*.cpp)
file(GLOB ErrorcodesGlob src/osrm/errorcodes.cpp)
file(GLOB ExtractorGlob Extractor/*.cpp)
set(ExtractorSources extractor.cpp ${ExtractorGlob})
add_executable(osrm-extract ${ExtractorSources})
add_library(UTIL OBJECT ${UtilGlob})
add_library(EXTRACTOR OBJECT ${ExtractorGlob})
add_library(GUIDANCE OBJECT ${GuidanceGlob})
add_library(PARTITIONER OBJECT ${PartitionerGlob})
add_library(CUSTOMIZER OBJECT ${CustomizerGlob})
add_library(CONTRACTOR OBJECT ${ContractorGlob})
add_library(UPDATER OBJECT ${UpdaterGlob})
add_library(STORAGE OBJECT ${StorageGlob})
add_library(ENGINE OBJECT ${EngineGlob})
file(GLOB PrepareGlob Contractor/*.cpp DataStructures/HilbertValue.cpp DataStructures/RestrictionMap.cpp)
set(PrepareSources prepare.cpp ${PrepareGlob})
add_executable(osrm-prepare ${PrepareSources})
if (BUILD_ROUTED)
add_library(SERVER OBJECT ${ServerGlob})
add_executable(osrm-routed src/tools/routed.cpp $<TARGET_OBJECTS:SERVER> $<TARGET_OBJECTS:UTIL>)
endif()
file(GLOB ServerGlob Server/*.cpp)
file(GLOB DescriptorGlob Descriptors/*.cpp)
file(GLOB DatastructureGlob DataStructures/SearchEngineData.cpp DataStructures/RouteParameters.cpp)
file(GLOB CoordinateGlob DataStructures/Coordinate.cpp)
file(GLOB AlgorithmGlob Algorithms/*.cpp)
file(GLOB HttpGlob Server/Http/*.cpp)
file(GLOB LibOSRMGlob Library/*.cpp)
set_target_properties(UTIL PROPERTIES LINKER_LANGUAGE CXX)
set(
OSRMSources
${LibOSRMGlob}
${DescriptorGlob}
${DatastructureGlob}
${CoordinateGlob}
${AlgorithmGlob}
${HttpGlob}
)
add_library(COORDLIB STATIC ${CoordinateGlob})
add_library(OSRM ${OSRMSources} Util/GitDescription.cpp Util/UUID.cpp)
add_library(UUID STATIC Util/UUID.cpp)
add_library(GITDESCRIPTION STATIC Util/GitDescription.cpp)
add_dependencies(UUID UUIDConfigure)
add_dependencies(GITDESCRIPTION GIT_DESCRIPTION)
add_executable(osrm-extract src/tools/extract.cpp)
add_executable(osrm-partition src/tools/partition.cpp)
add_executable(osrm-customize src/tools/customize.cpp)
add_executable(osrm-contract src/tools/contract.cpp)
add_executable(osrm-datastore src/tools/store.cpp $<TARGET_OBJECTS:MICROTAR> $<TARGET_OBJECTS:UTIL>)
add_library(osrm src/osrm/osrm.cpp $<TARGET_OBJECTS:ENGINE> $<TARGET_OBJECTS:STORAGE> $<TARGET_OBJECTS:MICROTAR> $<TARGET_OBJECTS:UTIL>)
add_library(osrm_contract src/osrm/contractor.cpp $<TARGET_OBJECTS:CONTRACTOR> $<TARGET_OBJECTS:UTIL>)
add_library(osrm_extract src/osrm/extractor.cpp $<TARGET_OBJECTS:EXTRACTOR> $<TARGET_OBJECTS:MICROTAR> $<TARGET_OBJECTS:UTIL>)
add_library(osrm_guidance $<TARGET_OBJECTS:GUIDANCE> $<TARGET_OBJECTS:UTIL>)
add_library(osrm_partition src/osrm/partitioner.cpp $<TARGET_OBJECTS:PARTITIONER> $<TARGET_OBJECTS:MICROTAR> $<TARGET_OBJECTS:UTIL>)
add_library(osrm_customize src/osrm/customizer.cpp $<TARGET_OBJECTS:CUSTOMIZER> $<TARGET_OBJECTS:MICROTAR> $<TARGET_OBJECTS:UTIL>)
add_library(osrm_update $<TARGET_OBJECTS:UPDATER> $<TARGET_OBJECTS:MICROTAR> $<TARGET_OBJECTS:UTIL>)
add_library(osrm_store $<TARGET_OBJECTS:STORAGE> $<TARGET_OBJECTS:MICROTAR> $<TARGET_OBJECTS:UTIL>)
add_executable(osrm-routed routed.cpp ${ServerGlob})
set_target_properties(osrm-routed PROPERTIES COMPILE_FLAGS -DROUTED)
add_executable(osrm-datastore datastore.cpp)
# Explicitly set the build type to Release if no other type is specified
# on the command line. Without this, cmake defaults to an unoptimized,
# non-debug build, which almost nobody wants.
if(NOT CMAKE_BUILD_TYPE)
message(STATUS "No build type specified, defaulting to Release")
# Check the release mode
if(NOT CMAKE_BUILD_TYPE MATCHES Debug)
set(CMAKE_BUILD_TYPE Release)
endif()
if(CMAKE_BUILD_TYPE MATCHES Debug)
message(STATUS "Configuring OSRM in debug mode")
elseif(CMAKE_BUILD_TYPE MATCHES Release)
message(STATUS "Configuring OSRM in release mode")
elseif(CMAKE_BUILD_TYPE MATCHES RelWithDebInfo)
message(STATUS "Configuring OSRM in release mode with debug flags")
elseif(CMAKE_BUILD_TYPE MATCHES MinRelSize)
message(STATUS "Configuring OSRM in release mode with minimized size")
else()
message(STATUS "Unrecognized build type - will use cmake defaults")
endif()
# Additional logic for the different build types
if(CMAKE_BUILD_TYPE MATCHES Debug OR CMAKE_BUILD_TYPE MATCHES RelWithDebInfo)
message(STATUS "Configuring debug mode flags")
set(ENABLE_ASSERTIONS ON)
set(ENABLE_DEBUG_LOGGING ON)
endif()
if(NOT CMAKE_CXX_COMPILER_ID MATCHES "MSVC")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -fno-inline -fno-omit-frame-pointer")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-inline -fno-omit-frame-pointer")
endif()
if(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
set(CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -ggdb")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Og -ggdb")
endif()
set(MAYBE_COVERAGE_LIBRARIES "")
if (ENABLE_COVERAGE)
if (NOT CMAKE_BUILD_TYPE MATCHES "Debug")
message(ERROR "ENABLE_COVERAGE=ON only makes sense with a Debug build")
if(NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
message(STATUS "adding profiling flags")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage -fno-inline")
set(CMAKE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-arcs -ftest-coverage -fno-inline")
endif()
message(STATUS "Enabling coverage")
set(MAYBE_COVERAGE_LIBRARIES "-lgcov")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -ftest-coverage -fprofile-arcs")
endif()
if(CMAKE_BUILD_TYPE MATCHES Release)
message(STATUS "Configuring OSRM in release mode")
# Check if LTO is available
set(LTO_FLAGS "")
CHECK_CXX_COMPILER_FLAG("-flto" HAS_LTO_FLAG)
if (HAS_LTO_FLAG)
set(LTO_FLAGS "${LTO_FLAGS} -flto")
if (ENABLE_SANITIZER)
set(SANITIZER_FLAGS "-g -fsanitize=address -fsanitize-address-use-after-scope -fsanitize=undefined -fno-omit-frame-pointer")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SANITIZER_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SANITIZER_FLAGS}")
set(OSRM_CXXFLAGS "${OSRM_CXXFLAGS} ${SANITIZER_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${SANITIZER_FLAGS}")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${SANITIZER_FLAGS}")
# Since gcc 4.9 the LTO format is non-standart ('slim'), so we need to use the build-in tools
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" AND
NOT "${CMAKE_CXX_COMPILER_VERSION}" VERSION_LESS "4.9.0")
message(STATUS "Using gcc specific binutils for LTO.")
set(CMAKE_AR "/usr/bin/gcc-ar")
set(CMAKE_RANLIB "/usr/bin/gcc-ranlib")
endif()
endif (HAS_LTO_FLAG)
endif()
# Configuring compilers
include(cmake/warnings.cmake)
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC -fcolor-diagnostics -ftemplate-depth=1024")
elseif(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
set(COLOR_FLAG "-fdiagnostics-color=auto")
check_cxx_compiler_flag("-fdiagnostics-color=auto" HAS_COLOR_FLAG)
if(NOT HAS_COLOR_FLAG)
set(COLOR_FLAG "")
endif()
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
# using Clang
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wunreachable-code -Wno-unknown-pragmas -Wno-unneeded-internal-declaration -pedantic -fPIC")
message(STATUS "OpenMP parallelization not available using clang++")
elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
# using GCC
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 ${COLOR_FLAG} -fPIC -ftemplate-depth=1024")
if(WIN32) # using mingw
add_dependency_defines(-DWIN32)
set(OPTIONAL_SOCKET_LIBS ws2_32 wsock32)
endif()
elseif(CMAKE_CXX_COMPILER_ID MATCHES "Intel")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -fopenmp -pedantic -fPIC")
elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
# using Intel C++
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -static-intel -wd10237 -Wall -ipo -fPIC")
elseif(CMAKE_CXX_COMPILER_ID MATCHES "MSVC")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -static-intel -wd10237 -Wall -openmp -ipo -fPIC")
elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
# using Visual Studio C++
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /bigobj") # avoid compiler error C1128 from scripting_environment_lua.cpp
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /DWIN32_LEAN_AND_MEAN") # avoid compiler error C2011 from dual #include of winsock.h and winsock2.h
add_dependency_defines(-DBOOST_LIB_DIAGNOSTIC)
add_dependency_defines(-D_CRT_SECURE_NO_WARNINGS)
add_dependency_defines(-DNOMINMAX) # avoid min and max macros that can break compilation
add_dependency_defines(-D_WIN32_WINNT=0x0501)
add_dependency_defines(-DXML_STATIC)
find_library(ws2_32_LIBRARY_PATH ws2_32)
target_link_libraries(osrm-extract wsock32 ws2_32)
endif()
# disable partitioning of LTO process when possible (fixes Debian issues)
set(LTO_PARTITION_FLAGS "")
CHECK_CXX_COMPILER_FLAG("-flto-partition=none" HAS_LTO_PARTITION_FLAG)
if (HAS_LTO_PARTITION_FLAG)
set(LTO_PARTITION_FLAGS "${LTO_PARTITION_FLAGS} -flto-partition=none")
endif (HAS_LTO_PARTITION_FLAG)
# Add Link-Time-Optimization flags, if supported (GCC >= 4.7) and enabled
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${LTO_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${LTO_FLAGS} ${LTO_PARTITION_FLAGS}")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${LTO_FLAGS} ${LTO_PARTITION_FLAGS}")
# Activate C++11
ADD_DEFINITIONS(
-std=c++11 # Or -std=c++0x
# Other flags
)
# Configuring other platform dependencies
if(APPLE)
set(CMAKE_OSX_ARCHITECTURES "x86_64")
message(STATUS "Set Architecture to x64 on OS X")
exec_program(uname ARGS -v OUTPUT_VARIABLE DARWIN_VERSION)
string(REGEX MATCH "[0-9]+" DARWIN_VERSION ${DARWIN_VERSION})
# if(DARWIN_VERSION GREATER 12 AND NOT OSXLIBSTD)
# message(STATUS "Activating -std=c++11 flag for >= OS X 10.9")
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
# endif()
if(OSXLIBSTD)
message(STATUS "linking against ${OSXLIBSTD}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=${OSXLIBSTD}")
elseif(DARWIN_VERSION GREATER 12)
message(STATUS "linking against libc++")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
endif()
endif()
if(UNIX AND NOT APPLE)
find_library(RT_LIB rt)
if (RT_LIB)
set(MAYBE_RT_LIBRARY -lrt)
endif()
target_link_libraries(osrm-datastore rt)
target_link_libraries(OSRM rt)
endif()
#Check Boost
set(BOOST_MIN_VERSION "1.49.0")
find_package(Boost ${BOOST_MIN_VERSION} COMPONENTS ${BOOST_COMPONENTS} REQUIRED)
if(NOT Boost_FOUND)
message(FATAL_ERROR "Fatal error: Boost (version >= 1.49.0) required.\n")
endif()
include_directories(${Boost_INCLUDE_DIRS})
target_link_libraries(OSRM ${Boost_LIBRARIES} COORDLIB)
target_link_libraries(osrm-extract ${Boost_LIBRARIES} UUID GITDESCRIPTION COORDLIB)
target_link_libraries(osrm-prepare ${Boost_LIBRARIES} UUID GITDESCRIPTION COORDLIB)
target_link_libraries(osrm-routed ${Boost_LIBRARIES} OSRM UUID GITDESCRIPTION)
target_link_libraries(osrm-datastore ${Boost_LIBRARIES} UUID GITDESCRIPTION COORDLIB)
find_package(Threads REQUIRED)
target_link_libraries(osrm-extract ${CMAKE_THREAD_LIBS_INIT})
# Third-party libraries
set(RAPIDJSON_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/third_party/rapidjson/include")
include_directories(SYSTEM ${RAPIDJSON_INCLUDE_DIR})
set(MICROTAR_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/third_party/microtar/src")
include_directories(SYSTEM ${MICROTAR_INCLUDE_DIR})
add_library(MICROTAR OBJECT "${CMAKE_CURRENT_SOURCE_DIR}/third_party/microtar/src/microtar.c")
set_property(TARGET MICROTAR PROPERTY POSITION_INDEPENDENT_CODE ON)
target_no_warning(MICROTAR unused-variable)
target_no_warning(MICROTAR format)
set(PROTOZERO_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/third_party/protozero/include")
include_directories(SYSTEM ${PROTOZERO_INCLUDE_DIR})
set(VTZERO_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/third_party/vtzero/include")
include_directories(SYSTEM ${VTZERO_INCLUDE_DIR})
set(FLATBUFFERS_BUILD_TESTS OFF CACHE BOOL "Disable the build of Flatbuffers tests and samples.")
set(FLATBUFFERS_SRC_DIR "${CMAKE_CURRENT_SOURCE_DIR}/third_party/flatbuffers")
set(FLATBUFFERS_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/third_party/flatbuffers/include")
include_directories(SYSTEM ${FLATBUFFERS_INCLUDE_DIR})
add_subdirectory(${FLATBUFFERS_SRC_DIR}
${CMAKE_CURRENT_BINARY_DIR}/flatbuffers-build
EXCLUDE_FROM_ALL)
set(FMT_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/third_party/fmt/include")
add_compile_definitions(FMT_HEADER_ONLY)
include_directories(SYSTEM ${FMT_INCLUDE_DIR})
# see https://stackoverflow.com/questions/70898030/boost-link-error-using-conan-find-package
if (MSVC)
add_definitions(-DBOOST_ALL_NO_LIB)
endif()
if(ENABLE_CONAN)
message(STATUS "Installing dependencies via Conan")
# Conan will generate Find*.cmake files to build directory, so we use them with the highest priority
list(INSERT CMAKE_MODULE_PATH 0 ${CMAKE_BINARY_DIR})
list(INSERT CMAKE_PREFIX_PATH 0 ${CMAKE_BINARY_DIR})
include(${CMAKE_CURRENT_LIST_DIR}/cmake/conan.cmake)
conan_check(REQUIRED)
set(CONAN_BOOST_VERSION "1.85.0@#14265ec82b25d91305bbb3b30d3357f8")
set(CONAN_BZIP2_VERSION "1.0.8@#d1b2d5816f25865acf978501dff1f897")
set(CONAN_EXPAT_VERSION "2.6.2@#2d385d0d50eb5561006a7ff9e356656b")
set(CONAN_LUA_VERSION "5.4.6@#658d6089093cf01992c2737ab2e96763")
set(CONAN_TBB_VERSION "2021.12.0@#e56e5b44be8d690530585dd3634c0106")
set(CONAN_SYSTEM_INCLUDES ON)
set(CONAN_ARGS
REQUIRES
"boost/${CONAN_BOOST_VERSION}"
"bzip2/${CONAN_BZIP2_VERSION}"
"expat/${CONAN_EXPAT_VERSION}"
"lua/${CONAN_LUA_VERSION}"
"onetbb/${CONAN_TBB_VERSION}"
BASIC_SETUP
GENERATORS cmake_find_package json # json generator generates a conanbuildinfo.json in the build folder so (non-CMake) projects can easily parse OSRM's dependencies
KEEP_RPATHS
NO_OUTPUT_DIRS
OPTIONS boost:without_stacktrace=True # Apple Silicon cross-compilation fails without it
BUILD missing
)
# Enable revisions in the conan config
execute_process(COMMAND ${CONAN_CMD} config set general.revisions_enabled=1 RESULT_VARIABLE RET_CODE)
if(NOT "${RET_CODE}" STREQUAL "0")
message(FATAL_ERROR "Error setting revisions for Conan: '${RET_CODE}'")
find_package(Lua52)
if(NOT LUA52_FOUND)
find_package(Lua51 REQUIRED)
if(NOT APPLE)
find_package(LuaJIT 5.1)
endif()
# explicitly say Conan to use x86 dependencies if build for x86 platforms (https://github.com/conan-io/cmake-conan/issues/141)
if(NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
conan_cmake_run("${CONAN_ARGS};ARCH;x86")
# cross-compilation for Apple Silicon
elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64" AND CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL "x86_64")
conan_cmake_run("${CONAN_ARGS};ARCH;armv8")
else()
conan_cmake_run("${CONAN_ARGS}")
endif()
add_dependency_includes(${CONAN_INCLUDE_DIRS_BOOST})
add_dependency_includes(${CONAN_INCLUDE_DIRS_BZIP2})
add_dependency_includes(${CONAN_INCLUDE_DIRS_EXPAT})
add_dependency_includes(${CONAN_INCLUDE_DIRS_LUA})
add_dependency_includes(${CONAN_INCLUDE_DIRS_TBB})
set(Boost_USE_STATIC_LIBS ON)
find_package(Boost REQUIRED COMPONENTS ${BOOST_COMPONENTS})
set(Boost_DATE_TIME_LIBRARY "${Boost_date_time_LIB_TARGETS}")
set(Boost_PROGRAM_OPTIONS_LIBRARY "${Boost_program_options_LIB_TARGETS}")
set(Boost_IOSTREAMS_LIBRARY "${Boost_iostreams_LIB_TARGETS}")
set(Boost_THREAD_LIBRARY "${Boost_thread_LIB_TARGETS}")
set(Boost_ZLIB_LIBRARY "${Boost_zlib_LIB_TARGETS}")
set(Boost_UNIT_TEST_FRAMEWORK_LIBRARY "${Boost_unit_test_framework_LIB_TARGETS}")
find_package(BZip2 REQUIRED)
find_package(EXPAT REQUIRED)
find_package(lua REQUIRED)
set(LUA_LIBRARIES ${lua_LIBRARIES})
find_package(TBB REQUIRED)
# note: we avoid calling find_package(Osmium ...) here to ensure that the
# expat and bzip2 are used from conan rather than the system
include_directories(SYSTEM ${CMAKE_CURRENT_SOURCE_DIR}/third_party/libosmium/include)
else()
find_package(Boost 1.70 REQUIRED COMPONENTS ${BOOST_COMPONENTS})
add_dependency_includes(${Boost_INCLUDE_DIRS})
find_package(TBB REQUIRED)
add_dependency_includes(${TBB_INCLUDE_DIR})
set(TBB_LIBRARIES TBB::tbb)
find_package(EXPAT REQUIRED)
add_dependency_includes(${EXPAT_INCLUDE_DIRS})
find_package(BZip2 REQUIRED)
add_dependency_includes(${BZIP2_INCLUDE_DIR})
find_package(Lua 5.2 REQUIRED)
if (LUA_FOUND)
message(STATUS "Using Lua ${LUA_VERSION_STRING}")
endif()
add_dependency_includes(${LUA_INCLUDE_DIR})
# add a target to generate API documentation with Doxygen
find_package(Doxygen)
if(DOXYGEN_FOUND)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile @ONLY)
add_custom_target(doc
${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating API documentation with Doxygen" VERBATIM
)
endif()
# note libosmium depends on expat and bzip2
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/third_party/libosmium/cmake")
if(NOT OSMIUM_INCLUDE_DIR)
set(OSMIUM_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/third_party/libosmium/include")
endif()
find_package(Osmium REQUIRED COMPONENTS io)
include_directories(SYSTEM ${OSMIUM_INCLUDE_DIR})
endif()
# prefix compilation with ccache by default if available and on clang or gcc
if(ENABLE_CCACHE AND (CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR CMAKE_CXX_COMPILER_ID MATCHES "GNU"))
find_program(CCACHE_FOUND ccache)
if(CCACHE_FOUND)
message(STATUS "Using ccache to speed up incremental builds")
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache)
if(NOT APPLE)
find_package(LuaJIT 5.2)
endif()
endif()
# even with conan builds we want to link to system zlib
# to ensure that osrm binaries play well with other binaries like nodejs
if( LUAJIT_FOUND )
target_link_libraries(osrm-extract ${LUAJIT_LIBRARIES})
target_link_libraries(osrm-prepare ${LUAJIT_LIBRARIES})
else()
target_link_libraries(osrm-extract ${LUA_LIBRARY})
target_link_libraries(osrm-prepare ${LUA_LIBRARY})
endif()
include_directories(${LUA_INCLUDE_DIR})
find_package(LibXml2 REQUIRED)
include_directories(${LIBXML2_INCLUDE_DIR})
target_link_libraries(osrm-extract ${LIBXML2_LIBRARIES})
find_package( Luabind REQUIRED )
include_directories(${LUABIND_INCLUDE_DIR})
target_link_libraries(osrm-extract ${LUABIND_LIBRARY})
target_link_libraries(osrm-prepare ${LUABIND_LIBRARY})
find_package( STXXL REQUIRED )
include_directories(${STXXL_INCLUDE_DIR})
target_link_libraries(OSRM ${STXXL_LIBRARY})
target_link_libraries(osrm-extract ${STXXL_LIBRARY})
target_link_libraries(osrm-prepare ${STXXL_LIBRARY})
find_package( OSMPBF REQUIRED )
include_directories(${OSMPBF_INCLUDE_DIR})
target_link_libraries(osrm-extract ${OSMPBF_LIBRARY})
target_link_libraries(osrm-prepare ${OSMPBF_LIBRARY})
find_package(Protobuf REQUIRED)
include_directories(${PROTOBUF_INCLUDE_DIRS})
target_link_libraries(osrm-extract ${PROTOBUF_LIBRARY})
target_link_libraries(osrm-prepare ${PROTOBUF_LIBRARY})
find_package(BZip2 REQUIRED)
include_directories(${BZIP_INCLUDE_DIRS})
target_link_libraries(osrm-extract ${BZIP2_LIBRARIES})
find_package(ZLIB REQUIRED)
add_dependency_includes(${ZLIB_INCLUDE_DIRS})
include_directories(${ZLIB_INCLUDE_DIRS})
target_link_libraries(osrm-extract ${ZLIB_LIBRARY})
target_link_libraries(osrm-routed ${ZLIB_LIBRARY})
add_dependency_defines(-DBOOST_SPIRIT_USE_PHOENIX_V3)
add_dependency_defines(-DBOOST_RESULT_OF_USE_DECLTYPE)
# Workaround for https://github.com/boostorg/phoenix/issues/111
add_dependency_defines(-DBOOST_PHOENIX_STL_TUPLE_H_)
add_definitions(${OSRM_DEFINES})
include_directories(SYSTEM ${DEPENDENCIES_INCLUDE_DIRS})
set(BOOST_BASE_LIBRARIES
${Boost_DATE_TIME_LIBRARY}
${Boost_IOSTREAMS_LIBRARY}
${Boost_THREAD_LIBRARY})
set(BOOST_ENGINE_LIBRARIES
${Boost_ZLIB_LIBRARY}
${Boost_REGEX_LIBRARY}
${BOOST_BASE_LIBRARIES})
# Binaries
target_link_libraries(osrm-datastore osrm_store ${Boost_PROGRAM_OPTIONS_LIBRARY})
target_link_libraries(osrm-extract osrm_extract ${Boost_PROGRAM_OPTIONS_LIBRARY})
target_link_libraries(osrm-partition osrm_partition ${Boost_PROGRAM_OPTIONS_LIBRARY})
target_link_libraries(osrm-customize osrm_customize ${Boost_PROGRAM_OPTIONS_LIBRARY})
target_link_libraries(osrm-contract osrm_contract ${Boost_PROGRAM_OPTIONS_LIBRARY})
if (BUILD_ROUTED)
target_link_libraries(osrm-routed osrm ${Boost_PROGRAM_OPTIONS_LIBRARY} ${OPTIONAL_SOCKET_LIBS} ${ZLIB_LIBRARY})
endif()
set(EXTRACTOR_LIBRARIES
${BZIP2_LIBRARIES}
${BOOST_BASE_LIBRARIES}
${CMAKE_THREAD_LIBS_INIT}
${EXPAT_LIBRARIES}
${LUA_LIBRARIES}
${OSMIUM_LIBRARIES}
${TBB_LIBRARIES}
${ZLIB_LIBRARY}
${MAYBE_COVERAGE_LIBRARIES})
set(GUIDANCE_LIBRARIES
${BOOST_BASE_LIBRARIES}
${CMAKE_THREAD_LIBS_INIT}
${LUA_LIBRARIES}
${TBB_LIBRARIES}
${MAYBE_COVERAGE_LIBRARIES})
set(PARTITIONER_LIBRARIES
${BOOST_ENGINE_LIBRARIES}
${CMAKE_THREAD_LIBS_INIT}
${TBB_LIBRARIES}
${MAYBE_RT_LIBRARY}
${MAYBE_COVERAGE_LIBRARIES}
${ZLIB_LIBRARY})
set(CUSTOMIZER_LIBRARIES
${BOOST_ENGINE_LIBRARIES}
${CMAKE_THREAD_LIBS_INIT}
${TBB_LIBRARIES}
${MAYBE_RT_LIBRARY}
${MAYBE_COVERAGE_LIBRARIES})
set(UPDATER_LIBRARIES
${BOOST_BASE_LIBRARIES}
${CMAKE_THREAD_LIBS_INIT}
${TBB_LIBRARIES}
${MAYBE_RT_LIBRARY}
${MAYBE_COVERAGE_LIBRARIES}
${ZLIB_LIBRARY})
set(CONTRACTOR_LIBRARIES
${BOOST_BASE_LIBRARIES}
${CMAKE_THREAD_LIBS_INIT}
${LUA_LIBRARIES}
${TBB_LIBRARIES}
${MAYBE_RT_LIBRARY}
${MAYBE_COVERAGE_LIBRARIES})
set(ENGINE_LIBRARIES
${BOOST_ENGINE_LIBRARIES}
${CMAKE_THREAD_LIBS_INIT}
${TBB_LIBRARIES}
${MAYBE_RT_LIBRARY}
${MAYBE_COVERAGE_LIBRARIES}
${ZLIB_LIBRARY})
set(STORAGE_LIBRARIES
${BOOST_BASE_LIBRARIES}
${CMAKE_THREAD_LIBS_INIT}
${TBB_LIBRARIES}
${MAYBE_RT_LIBRARY}
${MAYBE_COVERAGE_LIBRARIES})
set(UTIL_LIBRARIES
${BOOST_BASE_LIBRARIES}
${CMAKE_THREAD_LIBS_INIT}
${TBB_LIBRARIES}
${MAYBE_COVERAGE_LIBRARIES}
${ZLIB_LIBRARY})
# Libraries
target_link_libraries(osrm ${ENGINE_LIBRARIES})
target_link_libraries(osrm_update ${UPDATER_LIBRARIES})
target_link_libraries(osrm_contract ${CONTRACTOR_LIBRARIES} osrm_update osrm_store)
target_link_libraries(osrm_extract osrm_guidance ${EXTRACTOR_LIBRARIES})
target_link_libraries(osrm_partition ${PARTITIONER_LIBRARIES})
target_link_libraries(osrm_customize ${CUSTOMIZER_LIBRARIES} osrm_update osrm_store)
target_link_libraries(osrm_store ${STORAGE_LIBRARIES})
# BUILD_COMPONENTS
add_executable(osrm-components src/tools/components.cpp $<TARGET_OBJECTS:MICROTAR> $<TARGET_OBJECTS:UTIL>)
target_link_libraries(osrm-components ${TBB_LIBRARIES} ${BOOST_BASE_LIBRARIES} ${UTIL_LIBRARIES})
install(TARGETS osrm-components DESTINATION bin)
if(BUILD_TOOLS)
if(WITH_TOOLS)
message(STATUS "Activating OSRM internal tools")
add_executable(osrm-io-benchmark src/tools/io-benchmark.cpp $<TARGET_OBJECTS:UTIL>)
target_link_libraries(osrm-io-benchmark ${BOOST_BASE_LIBRARIES} ${TBB_LIBRARIES})
install(TARGETS osrm-io-benchmark DESTINATION bin)
find_package(GDAL)
if(GDAL_FOUND)
add_executable(osrm-components Tools/components.cpp)
include_directories(${GDAL_INCLUDE_DIR})
target_link_libraries(
osrm-components
${GDAL_LIBRARIES} ${Boost_LIBRARIES} UUID GITDESCRIPTION COORDLIB)
endif()
add_executable(osrm-cli Tools/simpleclient.cpp)
target_link_libraries(osrm-cli ${Boost_LIBRARIES} OSRM UUID GITDESCRIPTION)
add_executable(osrm-io-benchmark Tools/io-benchmark.cpp)
target_link_libraries(osrm-io-benchmark ${Boost_LIBRARIES} GITDESCRIPTION)
add_executable(osrm-unlock-all Tools/unlock_all_mutexes.cpp)
target_link_libraries(osrm-unlock-all ${Boost_LIBRARIES} GITDESCRIPTION)
if(UNIX AND NOT APPLE)
target_link_libraries(osrm-unlock-all rt)
endif()
endif()
if (ENABLE_ASSERTIONS)
message(STATUS "Enabling assertions")
add_definitions(-DBOOST_ENABLE_ASSERT_HANDLER)
endif()
if (ENABLE_DEBUG_LOGGING)
message(STATUS "Enabling debug logging")
add_definitions(-DENABLE_DEBUG_LOGGING)
endif()
file(GLOB InstallGlob Include/osrm/*.h Library/OSRM.h)
# Add RPATH info to executables so that when they are run after being installed
# (i.e., from /usr/local/bin/) the linker can find library dependencies. For
# more info see http://www.cmake.org/Wiki/CMake_RPATH_handling
set_property(TARGET osrm-extract PROPERTY INSTALL_RPATH_USE_LINK_PATH TRUE)
set_property(TARGET osrm-partition PROPERTY INSTALL_RPATH_USE_LINK_PATH TRUE)
set_property(TARGET osrm-contract PROPERTY INSTALL_RPATH_USE_LINK_PATH TRUE)
set_property(TARGET osrm-prepare PROPERTY INSTALL_RPATH_USE_LINK_PATH TRUE)
set_property(TARGET osrm-datastore PROPERTY INSTALL_RPATH_USE_LINK_PATH TRUE)
if (BUILD_ROUTED)
set_property(TARGET osrm-routed PROPERTY INSTALL_RPATH_USE_LINK_PATH TRUE)
endif()
set_property(TARGET osrm-routed PROPERTY INSTALL_RPATH_USE_LINK_PATH TRUE)
file(GLOB FlatbuffersGlob third_party/flatbuffers/include/flatbuffers/*.h)
file(GLOB LibraryGlob include/osrm/*.hpp)
file(GLOB ParametersGlob include/engine/api/*_parameters.hpp)
set(ApiHeader include/engine/api/base_result.hpp)
set(EngineHeader include/engine/status.hpp include/engine/engine_config.hpp include/engine/hint.hpp include/engine/bearing.hpp include/engine/approach.hpp include/engine/phantom_node.hpp)
set(UtilHeader include/util/coordinate.hpp include/util/json_container.hpp include/util/typedefs.hpp include/util/alias.hpp include/util/exception.hpp include/util/bearing.hpp)
set(ExtractorHeader include/extractor/extractor.hpp include/storage/io_config.hpp include/extractor/extractor_config.hpp include/extractor/travel_mode.hpp)
set(PartitionerHeader include/partitioner/partitioner.hpp include/partitioner/partitioner_config.hpp)
set(ContractorHeader include/contractor/contractor.hpp include/contractor/contractor_config.hpp)
set(StorageHeader include/storage/storage.hpp include/storage/io_config.hpp include/storage/storage_config.hpp)
install(FILES ${EngineHeader} DESTINATION include/osrm/engine)
install(FILES ${UtilHeader} DESTINATION include/osrm/util)
install(FILES ${StorageHeader} DESTINATION include/osrm/storage)
install(FILES ${ExtractorHeader} DESTINATION include/osrm/extractor)
install(FILES ${PartitionerHeader} DESTINATION include/osrm/partitioner)
install(FILES ${ContractorHeader} DESTINATION include/osrm/contractor)
install(FILES ${LibraryGlob} DESTINATION include/osrm)
install(FILES ${ParametersGlob} DESTINATION include/osrm/engine/api)
install(FILES ${ApiHeader} DESTINATION include/osrm/engine/api)
install(FILES ${FlatbuffersGlob} DESTINATION include/flatbuffers)
install(FILES ${InstallGlob} DESTINATION include/osrm)
install(TARGETS osrm-extract DESTINATION bin)
install(TARGETS osrm-partition DESTINATION bin)
install(TARGETS osrm-customize DESTINATION bin)
install(TARGETS osrm-contract DESTINATION bin)
install(TARGETS osrm-prepare DESTINATION bin)
install(TARGETS osrm-datastore DESTINATION bin)
if (BUILD_ROUTED)
install(TARGETS osrm-routed DESTINATION bin)
endif()
install(TARGETS osrm DESTINATION lib)
install(TARGETS osrm_extract DESTINATION lib)
install(TARGETS osrm_partition DESTINATION lib)
install(TARGETS osrm_customize DESTINATION lib)
install(TARGETS osrm_update DESTINATION lib)
install(TARGETS osrm_contract DESTINATION lib)
install(TARGETS osrm_store DESTINATION lib)
install(TARGETS osrm_guidance DESTINATION lib)
# Install profiles and support library to /usr/local/share/osrm/profiles by default
set(DefaultProfilesDir profiles)
install(DIRECTORY ${DefaultProfilesDir} DESTINATION share/osrm)
# Install data geojson files to /usr/local/share/osrm/data by default
set(DefaultProfilesDir data)
install(DIRECTORY ${DefaultProfilesDir} DESTINATION share/osrm)
# Setup exporting variables for pkgconfig and subproject
#
if(BUILD_PACKAGE)
include(CPackConfig)
include(CPack)
endif()
function(JOIN VALUES GLUE OUTPUT)
string (REPLACE ";" "${GLUE}" _TMP_STR "${VALUES}")
set (${OUTPUT} "${_TMP_STR}" PARENT_SCOPE)
endfunction()
JOIN("${OSRM_DEFINES}" " " TMP_OSRM_DEFINES)
set(LibOSRM_CXXFLAGS "${OSRM_CXXFLAGS} ${TMP_OSRM_DEFINES}")
set(LibOSRM_LDFLAGS "${OSRM_LDFLAGS}")
if(BUILD_AS_SUBPROJECT)
set(LibOSRM_CXXFLAGS "${LibOSRM_CXXFLAGS}" PARENT_SCOPE)
set(LibOSRM_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/include" PARENT_SCOPE)
set(LibOSRM_LIBRARY_DIR "${CMAKE_CURRENT_BINARY_DIR}" PARENT_SCOPE)
set(LibOSRM_LIBRARIES "osrm" PARENT_SCOPE)
set(LibOSRM_DEPENDENT_LIBRARIES "${ENGINE_LIBRARIES}" PARENT_SCOPE)
set(LibOSRM_INCLUDE_DIRS "${CMAKE_CURRENT_SOURCE_DIR}/include"
"${CMAKE_CURRENT_SOURCE_DIR}/include/osrm"
"${CMAKE_CURRENT_SOURCE_DIR}/third_party"
"${DEPENDENCIES_INCLUDE_DIRS}" PARENT_SCOPE)
set(LibOSRM_LIBRARY_DIRS "${LibOSRM_LIBRARY_DIR}" PARENT_SCOPE)
endif()
# pkgconfig defines
set(PKGCONFIG_OSRM_CXXFLAGS "${LibOSRM_CXXFLAGS}")
set(PKGCONFIG_OSRM_LDFLAGS "${LibOSRM_LDFLAGS}")
set(PKGCONFIG_LIBRARY_DIR "${CMAKE_INSTALL_PREFIX}/lib")
set(PKGCONFIG_INCLUDE_DIR "${CMAKE_INSTALL_PREFIX}/include")
list(APPEND DEPENDENCIES_INCLUDE_DIRS "${PKGCONFIG_INCLUDE_DIR}")
list(APPEND DEPENDENCIES_INCLUDE_DIRS "${PKGCONFIG_INCLUDE_DIR}/osrm")
JOIN("-I${DEPENDENCIES_INCLUDE_DIRS}" " -I" PKGCONFIG_OSRM_INCLUDE_FLAGS)
# Boost uses imported targets, we need to use a generator expression to extract
# the link libraries to be written to the pkg-config file.
# Conan & TBB define dependencies as CMake targets too, that's why we do the same for them.
foreach(engine_lib ${ENGINE_LIBRARIES})
if("${engine_lib}" MATCHES "^Boost.*" OR "${engine_lib}" MATCHES "^CONAN_LIB.*" OR "${engine_lib}" MATCHES "^TBB.*")
list(APPEND PKGCONFIG_DEPENDENT_LIBRARIES "$<TARGET_LINKER_FILE:${engine_lib}>")
else()
list(APPEND PKGCONFIG_DEPENDENT_LIBRARIES "${engine_lib}")
endif()
endforeach(engine_lib)
JOIN("${PKGCONFIG_DEPENDENT_LIBRARIES}" " " PKGCONFIG_OSRM_DEPENDENT_LIBRARIES)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cmake/pkgconfig.in pkgconfig.configured @ONLY)
file(GENERATE
OUTPUT
${PROJECT_BINARY_DIR}/libosrm.pc
INPUT
${PROJECT_BINARY_DIR}/pkgconfig.configured)
install(FILES ${PROJECT_BINARY_DIR}/libosrm.pc DESTINATION ${PKGCONFIG_LIBRARY_DIR}/pkgconfig)
# uninstall target
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in"
"${CMAKE_CURRENT_BINARY_DIR}/cmake/cmake_uninstall.cmake"
IMMEDIATE @ONLY)
add_custom_target(uninstall
COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake/cmake_uninstall.cmake)
# Modular build system: each directory registered here provides its own CMakeLists.txt
add_subdirectory(unit_tests)
add_subdirectory(src/benchmarks)
if (ENABLE_NODE_BINDINGS)
add_subdirectory(src/nodejs)
endif()
if (ENABLE_FUZZING)
# Requires libosrm being built with sanitizers; make configurable and default to ubsan
set(FUZZ_SANITIZER "undefined" CACHE STRING "Sanitizer to be used for Fuzz testing")
set_property(CACHE FUZZ_SANITIZER PROPERTY STRINGS "undefined" "integer" "address" "memory" "thread" "leak")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize-coverage=edge,indirect-calls,8bit-counters -fsanitize=address")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address")
set(OSRM_LDFLAGS "${OSRM_LDFLAGS} -fsanitize=address")
message(STATUS "Using -fsanitize=${FUZZ_SANITIZER} for Fuzz testing")
add_subdirectory(fuzz)
endif ()
# add headers sanity check target that includes all headers independently
set(check_headers_dir "${PROJECT_BINARY_DIR}/check-headers")
file(GLOB_RECURSE headers_to_check
${PROJECT_BINARY_DIR}/*.hpp
${PROJECT_SOURCE_DIR}/include/*.hpp)
foreach(header ${headers_to_check})
if ("${header}" MATCHES ".*/include/nodejs/.*")
# we do not check NodeJS bindings headers
continue()
endif()
get_filename_component(filename ${header} NAME_WE)
set(filename "${check_headers_dir}/${filename}.cpp")
if (NOT EXISTS ${filename})
file(WRITE ${filename} "#include \"${header}\"\n")
endif()
list(APPEND sources ${filename})
endforeach()
add_library(check-headers STATIC EXCLUDE_FROM_ALL ${sources})
set_target_properties(check-headers PROPERTIES ARCHIVE_OUTPUT_DIRECTORY ${check_headers_dir})
install(TARGETS osrm-routed DESTINATION bin)
install(TARGETS OSRM DESTINATION lib)
list(GET Boost_LIBRARIES 1 BOOST_LIBRARY_FIRST)
get_filename_component(BOOST_LIBRARY_LISTING "${BOOST_LIBRARY_FIRST}" PATH)
set(BOOST_LIBRARY_LISTING "-L${BOOST_LIBRARY_LISTING}")
foreach (lib ${Boost_LIBRARIES})
get_filename_component(BOOST_LIBRARY_NAME "${lib}" NAME_WE)
string(REPLACE "lib" "" BOOST_LIBRARY_NAME ${BOOST_LIBRARY_NAME})
set(BOOST_LIBRARY_LISTING "${BOOST_LIBRARY_LISTING} -l${BOOST_LIBRARY_NAME}")
endforeach ()
configure_file(${CMAKE_SOURCE_DIR}/cmake/pkgconfig.in libosrm.pc @ONLY)
install(FILES ${PROJECT_BINARY_DIR}/libosrm.pc DESTINATION lib/pkgconfig)

View File

@ -1,3 +0,0 @@
# Code of conduct
Everyone is invited to participate in Project OSRMs open source projects and public discussions: we want to create a welcoming and friendly environment. Harassment of participants or other unethical and unprofessional behavior will not be tolerated in our spaces. The [Contributor Covenant](http://contributor-covenant.org) applies to all projects under the Project-OSRM organization and we ask that you please read [the full text](http://contributor-covenant.org/version/1/2/0/).

View File

@ -1,81 +0,0 @@
# Everyone
Please take some time to review our [code of conduct](CODE-OF-CONDUCT.md) to help guide your interactions with others on this project.
# User
Before you open a new issue, please search for older ones that cover the same issue.
In general "me too" comments/issues are frowned upon.
You can add a :+1: emoji reaction to the issue if you want to express interest in this.
# Developer
We use `clang-format` version `15` to consistently format the code base. There is a helper script under `scripts/format.sh`.
The format is automatically checked by the `mason-linux-release` job of a Travis CI build.
To save development time a local hook `.git/hooks/pre-push`
```
#!/bin/sh
remote="$1"
if [ x"$remote" = xorigin ] ; then
if [ $(git rev-parse --abbrev-ref HEAD) = master ] ; then
echo "Rejected push to $remote/master" ; exit 1
fi
./scripts/format.sh && ./scripts/error_on_dirty.sh
if [ $? -ne 0 ] ; then
echo "Unstaged format changes" ; exit 1
fi
fi
```
could check code format, modify a local repository and reject push due to unstaged formatting changes.
Also `pre-push` hook rejects direct pushes to `origin/master`.
⚠️ `scripts/format.sh` checks all local files that match `*.cpp` or `*.hpp` patterns.
In general changes that affect the API and/or increase the memory consumption need to be discussed first.
Often we don't include changes that would increase the memory consumption a lot if they are not generally usable (e.g. elevation data is a good example).
## Pull Request
Every pull-request that changes the API needs to update the docs in `docs/http.md` and add an entry to `CHANGELOG.md`.
Breaking changes need to have a BREAKING prefix. See the [releasing documentation](docs/releasing.md) on how this affects the version.
Early feedback is also important.
You will see that a lot of the PR have tags like `[not ready]` or `[wip]`.
We like to open PRs as soon as we are starting to work on something to make it visible to the rest of the team.
If your work is going in entirely the wrong direction, there is a good chance someone will pick up on this before it is too late.
Everyone is encouraged to read PRs of other people and give feedback.
For every significant code change we require a pull request review before it is merged.
If your pull request modifies the API this need to be signed of by a team discussion.
This means you will need to find another member of the team with commit access and request a review of your pull request.
Once your pull request is reviewed you can merge it! If you don't have commit access, ping someone that has commit access.
If you do have commit access there are in general two accepted styles to merging:
1. Make sure the branch is up to date with `master`. Run `git rebase master` to find out.
2. Once that is ensured you can either:
- Click the nice green merge button (for a non-fast-forward merge)
- Merge by hand using a fast-forward merge
Which merge you prefer is up to personal preference. In general it is recommended to use fast-forward merges because it creates a history that is sequential and easier to understand.
# Maintainer
## Doing a release
There is an in-depth guide around how to push out a release once it is ready [here](docs/releasing.md).
## The API
Changes to the API need to be discussed and signed off by the team. Breaking changes even more so than additive changes.
## Milestones
If a pull request or an issue is applicable for the current or next milestone, depends on the target version number.
Since we use semantic versioning we restrict breaking changes to major releases.
After a Release Candidate is released we usually don't change the API anymore if it is not critical.
Bigger code changes after a RC was released should also be avoided.

963
Contractor/Contractor.h Normal file
View File

@ -0,0 +1,963 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CONTRACTOR_H
#define CONTRACTOR_H
#include "TemporaryStorage.h"
#include "../DataStructures/BinaryHeap.h"
#include "../DataStructures/DeallocatingVector.h"
#include "../DataStructures/DynamicGraph.h"
#include "../DataStructures/Percent.h"
#include "../DataStructures/XORFastHash.h"
#include "../DataStructures/XORFastHashStorage.h"
#include "../Util/OpenMPWrapper.h"
#include "../Util/SimpleLogger.h"
#include "../Util/StringUtil.h"
#include <boost/assert.hpp>
#include <algorithm>
#include <limits>
#include <vector>
class Contractor
{
private:
struct ContractorEdgeData
{
ContractorEdgeData()
: distance(0), id(0), originalEdges(0), shortcut(0), forward(0), backward(0),
is_original_via_node_ID(false)
{
}
ContractorEdgeData(unsigned _distance,
unsigned _originalEdges,
unsigned _id,
bool _shortcut,
bool _forward,
bool _backward)
: distance(_distance), id(_id),
originalEdges(std::min((unsigned)1 << 28, _originalEdges)), shortcut(_shortcut),
forward(_forward), backward(_backward), is_original_via_node_ID(false)
{
}
unsigned distance;
unsigned id;
unsigned originalEdges : 28;
bool shortcut : 1;
bool forward : 1;
bool backward : 1;
bool is_original_via_node_ID : 1;
} data;
struct ContractorHeapData
{
short hop;
bool target;
ContractorHeapData() : hop(0), target(false) {}
ContractorHeapData(short h, bool t) : hop(h), target(t) {}
};
typedef DynamicGraph<ContractorEdgeData> ContractorGraph;
// typedef BinaryHeap< NodeID, NodeID, int, ContractorHeapData, ArrayStorage<NodeID, NodeID>
// > ContractorHeap;
typedef BinaryHeap<NodeID, NodeID, int, ContractorHeapData, XORFastHashStorage<NodeID, NodeID>>
ContractorHeap;
typedef ContractorGraph::InputEdge ContractorEdge;
struct ContractorThreadData
{
ContractorHeap heap;
std::vector<ContractorEdge> inserted_edges;
std::vector<NodeID> neighbours;
ContractorThreadData(NodeID nodes) : heap(nodes) {}
};
struct NodePriorityData
{
int depth;
NodePriorityData() : depth(0) {}
};
struct ContractionStats
{
int edges_deleted_count;
int edges_added_count;
int original_edges_deleted_count;
int original_edges_added_count;
ContractionStats()
: edges_deleted_count(0), edges_added_count(0), original_edges_deleted_count(0),
original_edges_added_count(0)
{
}
};
struct RemainingNodeData
{
RemainingNodeData() : id(0), is_independent(false) {}
NodeID id : 31;
bool is_independent : 1;
};
public:
template <class ContainerT> Contractor(int nodes, ContainerT &input_edge_list)
{
std::vector<ContractorEdge> edges;
edges.reserve(input_edge_list.size() * 2);
temp_edge_counter = 0;
auto diter = input_edge_list.dbegin();
auto dend = input_edge_list.dend();
ContractorEdge new_edge;
while (diter != dend)
{
new_edge.source = diter->source();
new_edge.target = diter->target();
new_edge.data = ContractorEdgeData((std::max)((int)diter->weight(), 1),
1,
diter->id(),
false,
diter->isForward(),
diter->isBackward());
BOOST_ASSERT_MSG(new_edge.data.distance > 0, "edge distance < 1");
#ifndef NDEBUG
if (new_edge.data.distance > 24 * 60 * 60 * 10)
{
SimpleLogger().Write(logWARNING) << "Edge weight large -> "
<< new_edge.data.distance;
}
#endif
edges.push_back(new_edge);
std::swap(new_edge.source, new_edge.target);
new_edge.data.forward = diter->isBackward();
new_edge.data.backward = diter->isForward();
edges.push_back(new_edge);
++diter;
}
// clear input vector and trim the current set of edges with the well-known swap trick
input_edge_list.clear();
sort(edges.begin(), edges.end());
NodeID edge = 0;
for (NodeID i = 0; i < edges.size();)
{
const NodeID source = edges[i].source;
const NodeID target = edges[i].target;
const NodeID id = edges[i].data.id;
// remove eigenloops
if (source == target)
{
i++;
continue;
}
ContractorEdge forward_edge;
ContractorEdge reverse_edge;
forward_edge.source = reverse_edge.source = source;
forward_edge.target = reverse_edge.target = target;
forward_edge.data.forward = reverse_edge.data.backward = true;
forward_edge.data.backward = reverse_edge.data.forward = false;
forward_edge.data.shortcut = reverse_edge.data.shortcut = false;
forward_edge.data.id = reverse_edge.data.id = id;
forward_edge.data.originalEdges = reverse_edge.data.originalEdges = 1;
forward_edge.data.distance = reverse_edge.data.distance =
std::numeric_limits<int>::max();
// remove parallel edges
while (i < edges.size() && edges[i].source == source && edges[i].target == target)
{
if (edges[i].data.forward)
{
forward_edge.data.distance =
std::min(edges[i].data.distance, forward_edge.data.distance);
}
if (edges[i].data.backward)
{
reverse_edge.data.distance =
std::min(edges[i].data.distance, reverse_edge.data.distance);
}
++i;
}
// merge edges (s,t) and (t,s) into bidirectional edge
if (forward_edge.data.distance == reverse_edge.data.distance)
{
if ((int)forward_edge.data.distance != std::numeric_limits<int>::max())
{
forward_edge.data.backward = true;
edges[edge++] = forward_edge;
}
}
else
{ // insert seperate edges
if (((int)forward_edge.data.distance) != std::numeric_limits<int>::max())
{
edges[edge++] = forward_edge;
}
if ((int)reverse_edge.data.distance != std::numeric_limits<int>::max())
{
edges[edge++] = reverse_edge;
}
}
}
std::cout << "merged " << edges.size() - edge << " edges out of " << edges.size()
<< std::endl;
edges.resize(edge);
contractor_graph = std::make_shared<ContractorGraph>(nodes, edges);
edges.clear();
edges.shrink_to_fit();
BOOST_ASSERT(0 == edges.capacity());
// unsigned maxdegree = 0;
// NodeID highestNode = 0;
//
// for(unsigned i = 0; i < contractor_graph->GetNumberOfNodes(); ++i) {
// unsigned degree = contractor_graph->EndEdges(i) -
// contractor_graph->BeginEdges(i);
// if(degree > maxdegree) {
// maxdegree = degree;
// highestNode = i;
// }
// }
//
// SimpleLogger().Write() << "edges at node with id " << highestNode << " has degree
// " << maxdegree;
// for(unsigned i = contractor_graph->BeginEdges(highestNode); i <
// contractor_graph->EndEdges(highestNode); ++i) {
// SimpleLogger().Write() << " ->(" << highestNode << "," <<
// contractor_graph->GetTarget(i)
// << "); via: " << contractor_graph->GetEdgeData(i).via;
// }
// Create temporary file
edge_storage_slot = TemporaryStorage::GetInstance().AllocateSlot();
std::cout << "contractor finished initalization" << std::endl;
}
~Contractor() { TemporaryStorage::GetInstance().DeallocateSlot(edge_storage_slot); }
void Run()
{
const NodeID number_of_nodes = contractor_graph->GetNumberOfNodes();
Percent p(number_of_nodes);
const unsigned thread_count = omp_get_max_threads();
std::vector<ContractorThreadData *> thread_data_list;
for (unsigned thread_id = 0; thread_id < thread_count; ++thread_id)
{
thread_data_list.push_back(new ContractorThreadData(number_of_nodes));
}
std::cout << "Contractor is using " << thread_count << " threads" << std::endl;
NodeID number_of_contracted_nodes = 0;
std::vector<RemainingNodeData> remaining_nodes(number_of_nodes);
std::vector<float> node_priorities(number_of_nodes);
std::vector<NodePriorityData> node_data(number_of_nodes);
// initialize priorities in parallel
#pragma omp parallel for schedule(guided)
for (int x = 0; x < (int)number_of_nodes; ++x)
{
remaining_nodes[x].id = x;
}
std::cout << "initializing elimination PQ ..." << std::flush;
#pragma omp parallel
{
ContractorThreadData *data = thread_data_list[omp_get_thread_num()];
#pragma omp parallel for schedule(guided)
for (int x = 0; x < (int)number_of_nodes; ++x)
{
node_priorities[x] = EvaluateNodePriority(data, &node_data[x], x);
}
}
std::cout << "ok" << std::endl << "preprocessing " << number_of_nodes << " nodes ..."
<< std::flush;
bool flushed_contractor = false;
while (number_of_nodes > 2 && number_of_contracted_nodes < number_of_nodes)
{
if (!flushed_contractor && (number_of_contracted_nodes > (number_of_nodes * 0.65)))
{
DeallocatingVector<ContractorEdge> new_edge_set; // this one is not explicitely
// cleared since it goes out of
// scope anywa
std::cout << " [flush " << number_of_contracted_nodes << " nodes] " << std::flush;
// Delete old heap data to free memory that we need for the coming operations
for (ContractorThreadData *data : thread_data_list)
{
delete data;
}
thread_data_list.clear();
// Create new priority array
std::vector<float> new_node_priority(remaining_nodes.size());
// this map gives the old IDs from the new ones, necessary to get a consistent graph
// at the end of contraction
orig_node_id_to_new_id_map.resize(remaining_nodes.size());
// this map gives the new IDs from the old ones, necessary to remap targets from the
// remaining graph
std::vector<NodeID> new_node_id_from_orig_id_map(number_of_nodes, UINT_MAX);
// build forward and backward renumbering map and remap ids in remaining_nodes and
// Priorities.
for (unsigned new_node_id = 0; new_node_id < remaining_nodes.size(); ++new_node_id)
{
// create renumbering maps in both directions
orig_node_id_to_new_id_map[new_node_id] = remaining_nodes[new_node_id].id;
new_node_id_from_orig_id_map[remaining_nodes[new_node_id].id] = new_node_id;
new_node_priority[new_node_id] =
node_priorities[remaining_nodes[new_node_id].id];
remaining_nodes[new_node_id].id = new_node_id;
}
TemporaryStorage &temporary_storage = TemporaryStorage::GetInstance();
// walk over all nodes
for (unsigned i = 0; i < contractor_graph->GetNumberOfNodes(); ++i)
{
const NodeID start = i;
for (auto current_edge : contractor_graph->GetAdjacentEdgeRange(start))
{
ContractorGraph::EdgeData &data =
contractor_graph->GetEdgeData(current_edge);
const NodeID target = contractor_graph->GetTarget(current_edge);
if (UINT_MAX == new_node_id_from_orig_id_map[i])
{
// Save edges of this node w/o renumbering.
temporary_storage.WriteToSlot(
edge_storage_slot, (char *)&start, sizeof(NodeID));
temporary_storage.WriteToSlot(
edge_storage_slot, (char *)&target, sizeof(NodeID));
temporary_storage.WriteToSlot(edge_storage_slot,
(char *)&data,
sizeof(ContractorGraph::EdgeData));
++temp_edge_counter;
}
else
{
// node is not yet contracted.
// add (renumbered) outgoing edges to new DynamicGraph.
ContractorEdge new_edge;
new_edge.source = new_node_id_from_orig_id_map[start];
new_edge.target = new_node_id_from_orig_id_map[target];
new_edge.data = data;
new_edge.data.is_original_via_node_ID = true;
BOOST_ASSERT_MSG(UINT_MAX != new_node_id_from_orig_id_map[start],
"new start id not resolveable");
BOOST_ASSERT_MSG(UINT_MAX != new_node_id_from_orig_id_map[target],
"new target id not resolveable");
new_edge_set.push_back(new_edge);
}
}
}
// Delete map from old NodeIDs to new ones.
new_node_id_from_orig_id_map.clear();
new_node_id_from_orig_id_map.shrink_to_fit();
// Replace old priorities array by new one
node_priorities.swap(new_node_priority);
// Delete old node_priorities vector
std::vector<float>().swap(new_node_priority);
// old Graph is removed
contractor_graph.reset();
// create new graph
std::sort(new_edge_set.begin(), new_edge_set.end());
contractor_graph =
std::make_shared<ContractorGraph>(remaining_nodes.size(), new_edge_set);
new_edge_set.clear();
flushed_contractor = true;
// INFO: MAKE SURE THIS IS THE LAST OPERATION OF THE FLUSH!
// reinitialize heaps and ThreadData objects with appropriate size
for (unsigned thread_id = 0; thread_id < thread_count; ++thread_id)
{
thread_data_list.push_back(
new ContractorThreadData(contractor_graph->GetNumberOfNodes()));
}
}
const int last = (int)remaining_nodes.size();
#pragma omp parallel
{
// determine independent node set
ContractorThreadData *const data = thread_data_list[omp_get_thread_num()];
#pragma omp for schedule(guided)
for (int i = 0; i < last; ++i)
{
const NodeID node = remaining_nodes[i].id;
remaining_nodes[i].is_independent =
IsNodeIndependent(node_priorities, data, node);
}
}
const auto first = stable_partition(remaining_nodes.begin(),
remaining_nodes.end(),
[](RemainingNodeData node_data)
{ return !node_data.is_independent; });
const int first_independent_node = first - remaining_nodes.begin();
// contract independent nodes
#pragma omp parallel
{
ContractorThreadData *data = thread_data_list[omp_get_thread_num()];
#pragma omp for schedule(guided) nowait
for (int position = first_independent_node; position < last; ++position)
{
NodeID x = remaining_nodes[position].id;
ContractNode<false>(data, x);
}
std::sort(data->inserted_edges.begin(), data->inserted_edges.end());
}
#pragma omp parallel
{
ContractorThreadData *data = thread_data_list[omp_get_thread_num()];
#pragma omp for schedule(guided) nowait
for (int position = first_independent_node; position < last; ++position)
{
NodeID x = remaining_nodes[position].id;
DeleteIncomingEdges(data, x);
}
}
// insert new edges
for (unsigned thread_id = 0; thread_id < thread_count; ++thread_id)
{
ContractorThreadData &data = *thread_data_list[thread_id];
for (const ContractorEdge &edge : data.inserted_edges)
{
auto current_edge_ID = contractor_graph->FindEdge(edge.source, edge.target);
if (current_edge_ID < contractor_graph->EndEdges(edge.source))
{
ContractorGraph::EdgeData &current_data =
contractor_graph->GetEdgeData(current_edge_ID);
if (current_data.shortcut && edge.data.forward == current_data.forward &&
edge.data.backward == current_data.backward &&
edge.data.distance < current_data.distance)
{
// found a duplicate edge with smaller weight, update it.
current_data = edge.data;
continue;
}
}
contractor_graph->InsertEdge(edge.source, edge.target, edge.data);
}
data.inserted_edges.clear();
}
// update priorities
#pragma omp parallel
{
ContractorThreadData *data = thread_data_list[omp_get_thread_num()];
#pragma omp for schedule(guided) nowait
for (int position = first_independent_node; position < last; ++position)
{
NodeID x = remaining_nodes[position].id;
UpdateNodeNeighbours(node_priorities, node_data, data, x);
}
}
// remove contracted nodes from the pool
number_of_contracted_nodes += last - first_independent_node;
remaining_nodes.resize(first_independent_node);
std::vector<RemainingNodeData>(remaining_nodes).swap(remaining_nodes);
// unsigned maxdegree = 0;
// unsigned avgdegree = 0;
// unsigned mindegree = UINT_MAX;
// unsigned quaddegree = 0;
//
// for(unsigned i = 0; i < remaining_nodes.size(); ++i) {
// unsigned degree = contractor_graph->EndEdges(remaining_nodes[i].first)
// -
// contractor_graph->BeginEdges(remaining_nodes[i].first);
// if(degree > maxdegree)
// maxdegree = degree;
// if(degree < mindegree)
// mindegree = degree;
//
// avgdegree += degree;
// quaddegree += (degree*degree);
// }
//
// avgdegree /= std::max((unsigned)1,(unsigned)remaining_nodes.size() );
// quaddegree /= std::max((unsigned)1,(unsigned)remaining_nodes.size() );
//
// SimpleLogger().Write() << "rest: " << remaining_nodes.size() << ", max: "
// << maxdegree << ", min: " << mindegree << ", avg: " << avgdegree << ",
// quad: " << quaddegree;
p.printStatus(number_of_contracted_nodes);
}
for (ContractorThreadData *data : thread_data_list)
{
delete data;
}
thread_data_list.clear();
}
template <class Edge> inline void GetEdges(DeallocatingVector<Edge> &edges)
{
Percent p(contractor_graph->GetNumberOfNodes());
SimpleLogger().Write() << "Getting edges of minimized graph";
NodeID number_of_nodes = contractor_graph->GetNumberOfNodes();
if (contractor_graph->GetNumberOfNodes())
{
Edge new_edge;
for (NodeID node = 0; node < number_of_nodes; ++node)
{
p.printStatus(node);
for (auto edge : contractor_graph->GetAdjacentEdgeRange(node))
{
const NodeID target = contractor_graph->GetTarget(edge);
const ContractorGraph::EdgeData &data = contractor_graph->GetEdgeData(edge);
if (!orig_node_id_to_new_id_map.empty())
{
new_edge.source = orig_node_id_to_new_id_map[node];
new_edge.target = orig_node_id_to_new_id_map[target];
}
else
{
new_edge.source = node;
new_edge.target = target;
}
BOOST_ASSERT_MSG(UINT_MAX != new_edge.source, "Source id invalid");
BOOST_ASSERT_MSG(UINT_MAX != new_edge.target, "Target id invalid");
new_edge.data.distance = data.distance;
new_edge.data.shortcut = data.shortcut;
if (!data.is_original_via_node_ID && !orig_node_id_to_new_id_map.empty())
{
new_edge.data.id = orig_node_id_to_new_id_map[data.id];
}
else
{
new_edge.data.id = data.id;
}
BOOST_ASSERT_MSG(new_edge.data.id != INT_MAX, // 2^31
"edge id invalid");
new_edge.data.forward = data.forward;
new_edge.data.backward = data.backward;
edges.push_back(new_edge);
}
}
}
contractor_graph.reset();
orig_node_id_to_new_id_map.clear();
orig_node_id_to_new_id_map.shrink_to_fit();
BOOST_ASSERT(0 == orig_node_id_to_new_id_map.capacity());
TemporaryStorage &temporary_storage = TemporaryStorage::GetInstance();
// loads edges of graph before renumbering, no need for further numbering action.
NodeID start;
NodeID target;
ContractorGraph::EdgeData data;
Edge restored_edge;
for (unsigned i = 0; i < temp_edge_counter; ++i)
{
temporary_storage.ReadFromSlot(edge_storage_slot, (char *)&start, sizeof(NodeID));
temporary_storage.ReadFromSlot(edge_storage_slot, (char *)&target, sizeof(NodeID));
temporary_storage.ReadFromSlot(
edge_storage_slot, (char *)&data, sizeof(ContractorGraph::EdgeData));
restored_edge.source = start;
restored_edge.target = target;
restored_edge.data.distance = data.distance;
restored_edge.data.shortcut = data.shortcut;
restored_edge.data.id = data.id;
restored_edge.data.forward = data.forward;
restored_edge.data.backward = data.backward;
edges.push_back(restored_edge);
}
temporary_storage.DeallocateSlot(edge_storage_slot);
}
private:
inline void Dijkstra(const int max_distance,
const unsigned number_of_targets,
const int maxNodes,
ContractorThreadData *const data,
const NodeID middleNode)
{
ContractorHeap &heap = data->heap;
int nodes = 0;
unsigned number_of_targets_found = 0;
while (heap.Size() > 0)
{
const NodeID node = heap.DeleteMin();
const int distance = heap.GetKey(node);
const short current_hop = heap.GetData(node).hop + 1;
if (++nodes > maxNodes)
{
return;
}
// Destination settled?
if (distance > max_distance)
{
return;
}
if (heap.GetData(node).target)
{
++number_of_targets_found;
if (number_of_targets_found >= number_of_targets)
{
return;
}
}
// iterate over all edges of node
for (auto edge : contractor_graph->GetAdjacentEdgeRange(node))
{
const ContractorEdgeData &data = contractor_graph->GetEdgeData(edge);
if (!data.forward)
{
continue;
}
const NodeID to = contractor_graph->GetTarget(edge);
if (middleNode == to)
{
continue;
}
const int to_distance = distance + data.distance;
// New Node discovered -> Add to Heap + Node Info Storage
if (!heap.WasInserted(to))
{
heap.Insert(to, to_distance, ContractorHeapData(current_hop, false));
}
// Found a shorter Path -> Update distance
else if (to_distance < heap.GetKey(to))
{
heap.DecreaseKey(to, to_distance);
heap.GetData(to).hop = current_hop;
}
}
}
}
inline float EvaluateNodePriority(ContractorThreadData *const data,
NodePriorityData *const node_data,
const NodeID node)
{
ContractionStats stats;
// perform simulated contraction
ContractNode<true>(data, node, &stats);
// Result will contain the priority
float result;
if (0 == (stats.edges_deleted_count * stats.original_edges_deleted_count))
{
result = 1 * node_data->depth;
}
else
{
result = 2 * (((float)stats.edges_added_count) / stats.edges_deleted_count) +
4 * (((float)stats.original_edges_added_count) /
stats.original_edges_deleted_count) +
1 * node_data->depth;
}
BOOST_ASSERT(result >= 0);
return result;
}
template <bool RUNSIMULATION>
inline bool
ContractNode(ContractorThreadData *data, NodeID node, ContractionStats *stats = NULL)
{
ContractorHeap &heap = data->heap;
int inserted_edges_size = data->inserted_edges.size();
std::vector<ContractorEdge> &inserted_edges = data->inserted_edges;
for (auto in_edge : contractor_graph->GetAdjacentEdgeRange(node))
{
const ContractorEdgeData &in_data = contractor_graph->GetEdgeData(in_edge);
const NodeID source = contractor_graph->GetTarget(in_edge);
if (RUNSIMULATION)
{
BOOST_ASSERT(stats != NULL);
++stats->edges_deleted_count;
stats->original_edges_deleted_count += in_data.originalEdges;
}
if (!in_data.backward)
{
continue;
}
heap.Clear();
heap.Insert(source, 0, ContractorHeapData());
int max_distance = 0;
unsigned number_of_targets = 0;
for (auto out_edge : contractor_graph->GetAdjacentEdgeRange(node))
{
const ContractorEdgeData &out_data = contractor_graph->GetEdgeData(out_edge);
if (!out_data.forward)
{
continue;
}
const NodeID target = contractor_graph->GetTarget(out_edge);
const int path_distance = in_data.distance + out_data.distance;
max_distance = std::max(max_distance, path_distance);
if (!heap.WasInserted(target))
{
heap.Insert(target, INT_MAX, ContractorHeapData(0, true));
++number_of_targets;
}
}
if (RUNSIMULATION)
{
Dijkstra(max_distance, number_of_targets, 1000, data, node);
}
else
{
Dijkstra(max_distance, number_of_targets, 2000, data, node);
}
for (auto out_edge : contractor_graph->GetAdjacentEdgeRange(node))
{
const ContractorEdgeData &out_data = contractor_graph->GetEdgeData(out_edge);
if (!out_data.forward)
{
continue;
}
const NodeID target = contractor_graph->GetTarget(out_edge);
const int path_distance = in_data.distance + out_data.distance;
const int distance = heap.GetKey(target);
if (path_distance < distance)
{
if (RUNSIMULATION)
{
BOOST_ASSERT(stats != NULL);
stats->edges_added_count += 2;
stats->original_edges_added_count +=
2 * (out_data.originalEdges + in_data.originalEdges);
}
else
{
ContractorEdge new_edge;
new_edge.source = source;
new_edge.target = target;
new_edge.data =
ContractorEdgeData(path_distance,
out_data.originalEdges + in_data.originalEdges,
node /*, 0, in_data.turnInstruction*/,
true,
true,
false);
;
inserted_edges.push_back(new_edge);
std::swap(new_edge.source, new_edge.target);
new_edge.data.forward = false;
new_edge.data.backward = true;
inserted_edges.push_back(new_edge);
}
}
}
}
if (!RUNSIMULATION)
{
int iend = inserted_edges.size();
for (int i = inserted_edges_size; i < iend; ++i)
{
bool found = false;
for (int other = i + 1; other < iend; ++other)
{
if (inserted_edges[other].source != inserted_edges[i].source)
{
continue;
}
if (inserted_edges[other].target != inserted_edges[i].target)
{
continue;
}
if (inserted_edges[other].data.distance != inserted_edges[i].data.distance)
{
continue;
}
if (inserted_edges[other].data.shortcut != inserted_edges[i].data.shortcut)
{
continue;
}
inserted_edges[other].data.forward |= inserted_edges[i].data.forward;
inserted_edges[other].data.backward |= inserted_edges[i].data.backward;
found = true;
break;
}
if (!found)
{
inserted_edges[inserted_edges_size++] = inserted_edges[i];
}
}
inserted_edges.resize(inserted_edges_size);
}
return true;
}
inline void DeleteIncomingEdges(ContractorThreadData *data, const NodeID node)
{
std::vector<NodeID> &neighbours = data->neighbours;
neighbours.clear();
// find all neighbours
for (auto e : contractor_graph->GetAdjacentEdgeRange(node))
{
const NodeID u = contractor_graph->GetTarget(e);
if (u != node)
{
neighbours.push_back(u);
}
}
// eliminate duplicate entries ( forward + backward edges )
std::sort(neighbours.begin(), neighbours.end());
neighbours.resize(std::unique(neighbours.begin(), neighbours.end()) - neighbours.begin());
for (int i = 0, e = (int)neighbours.size(); i < e; ++i)
{
contractor_graph->DeleteEdgesTo(neighbours[i], node);
}
}
inline bool UpdateNodeNeighbours(std::vector<float> &priorities,
std::vector<NodePriorityData> &node_data,
ContractorThreadData *const data,
const NodeID node)
{
std::vector<NodeID> &neighbours = data->neighbours;
neighbours.clear();
// find all neighbours
for (auto e : contractor_graph->GetAdjacentEdgeRange(node))
{
const NodeID u = contractor_graph->GetTarget(e);
if (u == node)
{
continue;
}
neighbours.push_back(u);
node_data[u].depth = (std::max)(node_data[node].depth + 1, node_data[u].depth);
}
// eliminate duplicate entries ( forward + backward edges )
std::sort(neighbours.begin(), neighbours.end());
neighbours.resize(std::unique(neighbours.begin(), neighbours.end()) - neighbours.begin());
// re-evaluate priorities of neighboring nodes
for (const NodeID u : neighbours)
{
priorities[u] = EvaluateNodePriority(data, &(node_data)[u], u);
}
return true;
}
inline bool IsNodeIndependent(
const std::vector<float> &priorities /*, const std::vector< NodePriorityData >& node_data*/,
ContractorThreadData *const data,
NodeID node) const
{
const float priority = priorities[node];
std::vector<NodeID> &neighbours = data->neighbours;
neighbours.clear();
for (auto e : contractor_graph->GetAdjacentEdgeRange(node))
{
const NodeID target = contractor_graph->GetTarget(e);
if (node == target)
{
continue;
}
const float target_priority = priorities[target];
BOOST_ASSERT(target_priority >= 0);
// found a neighbour with lower priority?
if (priority > target_priority)
{
return false;
}
// tie breaking
if (std::abs(priority - target_priority) < std::numeric_limits<float>::epsilon() &&
bias(node, target))
{
return false;
}
neighbours.push_back(target);
}
std::sort(neighbours.begin(), neighbours.end());
neighbours.resize(std::unique(neighbours.begin(), neighbours.end()) - neighbours.begin());
// examine all neighbours that are at most 2 hops away
for (const NodeID u : neighbours)
{
for (auto e : contractor_graph->GetAdjacentEdgeRange(u))
{
const NodeID target = contractor_graph->GetTarget(e);
if (node == target)
{
continue;
}
const float target_priority = priorities[target];
assert(target_priority >= 0);
// found a neighbour with lower priority?
if (priority > target_priority)
{
return false;
}
// tie breaking
if (std::abs(priority - target_priority) < std::numeric_limits<float>::epsilon() &&
bias(node, target))
{
return false;
}
}
}
return true;
}
// This bias function takes up 22 assembly instructions in total on X86
inline bool bias(const NodeID a, const NodeID b) const
{
unsigned short hasha = fast_hash(a);
unsigned short hashb = fast_hash(b);
// The compiler optimizes that to conditional register flags but without branching
// statements!
if (hasha != hashb)
{
return hasha < hashb;
}
return a < b;
}
std::shared_ptr<ContractorGraph> contractor_graph;
std::vector<ContractorGraph::InputEdge> contracted_edge_list;
unsigned edge_storage_slot;
uint64_t temp_edge_counter;
std::vector<NodeID> orig_node_id_to_new_id_map;
XORFastHash fast_hash;
};
#endif // CONTRACTOR_H

View File

@ -0,0 +1,775 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "EdgeBasedGraphFactory.h"
#include "../Algorithms/BFSComponentExplorer.h"
#include "../DataStructures/Percent.h"
#include "../Util/ComputeAngle.h"
#include "../Util/LuaUtil.h"
#include "../Util/SimpleLogger.h"
#include "../Util/TimingUtil.h"
#include <boost/assert.hpp>
#include <fstream>
#include <limits>
EdgeBasedGraphFactory::EdgeBasedGraphFactory(
const std::shared_ptr<NodeBasedDynamicGraph> &node_based_graph,
std::unique_ptr<RestrictionMap> restriction_map,
std::vector<NodeID> &barrier_node_list,
std::vector<NodeID> &traffic_light_node_list,
std::vector<NodeInfo> &m_node_info_list,
SpeedProfileProperties &speed_profile)
: speed_profile(speed_profile),
m_number_of_edge_based_nodes(std::numeric_limits<unsigned>::max()),
m_node_info_list(m_node_info_list), m_node_based_graph(node_based_graph),
m_restriction_map(std::move(restriction_map)), max_id(0)
{
// insert into unordered sets for fast lookup
m_barrier_nodes.insert(barrier_node_list.begin(), barrier_node_list.end());
m_traffic_lights.insert(traffic_light_node_list.begin(), traffic_light_node_list.end());
}
void EdgeBasedGraphFactory::GetEdgeBasedEdges(DeallocatingVector<EdgeBasedEdge> &output_edge_list)
{
BOOST_ASSERT_MSG(0 == output_edge_list.size(), "Vector is not empty");
m_edge_based_edge_list.swap(output_edge_list);
}
void EdgeBasedGraphFactory::GetEdgeBasedNodes(std::vector<EdgeBasedNode> &nodes)
{
#ifndef NDEBUG
for (const EdgeBasedNode &node : m_edge_based_node_list)
{
BOOST_ASSERT(m_node_info_list.at(node.u).lat != INT_MAX);
BOOST_ASSERT(m_node_info_list.at(node.u).lon != INT_MAX);
BOOST_ASSERT(m_node_info_list.at(node.v).lon != INT_MAX);
BOOST_ASSERT(m_node_info_list.at(node.v).lat != INT_MAX);
}
#endif
nodes.swap(m_edge_based_node_list);
}
void
EdgeBasedGraphFactory::InsertEdgeBasedNode(NodeID u, NodeID v, EdgeID e1, bool belongs_to_tiny_cc)
{
// merge edges together into one EdgeBasedNode
BOOST_ASSERT(u != SPECIAL_NODEID);
BOOST_ASSERT(v != SPECIAL_NODEID);
BOOST_ASSERT(e1 != SPECIAL_EDGEID);
#ifndef NDEBUG
// find forward edge id and
const EdgeID e1b = m_node_based_graph->FindEdge(u, v);
BOOST_ASSERT(e1 == e1b);
#endif
BOOST_ASSERT(e1 != SPECIAL_EDGEID);
const EdgeData &forward_data = m_node_based_graph->GetEdgeData(e1);
// find reverse edge id and
const EdgeID e2 = m_node_based_graph->FindEdge(v, u);
#ifndef NDEBUG
if (e2 == m_node_based_graph->EndEdges(v))
{
SimpleLogger().Write(logWARNING) << "Did not find edge (" << v << "," << u << ")";
}
#endif
BOOST_ASSERT(e2 != SPECIAL_EDGEID);
BOOST_ASSERT(e2 < m_node_based_graph->EndEdges(v));
const EdgeData &reverse_data = m_node_based_graph->GetEdgeData(e2);
if (forward_data.edgeBasedNodeID == SPECIAL_NODEID &&
reverse_data.edgeBasedNodeID == SPECIAL_NODEID)
{
return;
}
BOOST_ASSERT(m_geometry_compressor.HasEntryForID(e1) ==
m_geometry_compressor.HasEntryForID(e2));
if (m_geometry_compressor.HasEntryForID(e1))
{
BOOST_ASSERT(m_geometry_compressor.HasEntryForID(e2));
// reconstruct geometry and put in each individual edge with its offset
const std::vector<GeometryCompressor::CompressedNode> &forward_geometry =
m_geometry_compressor.GetBucketReference(e1);
const std::vector<GeometryCompressor::CompressedNode> &reverse_geometry =
m_geometry_compressor.GetBucketReference(e2);
BOOST_ASSERT(forward_geometry.size() == reverse_geometry.size());
BOOST_ASSERT(0 != forward_geometry.size());
// reconstruct bidirectional edge with individual weights and put each into the NN index
std::vector<int> forward_dist_prefix_sum(forward_geometry.size(), 0);
std::vector<int> reverse_dist_prefix_sum(reverse_geometry.size(), 0);
// quick'n'dirty prefix sum as std::partial_sum needs addtional casts
// TODO: move to lambda function with C++11
int temp_sum = 0;
for (unsigned i = 0; i < forward_geometry.size(); ++i)
{
forward_dist_prefix_sum[i] = temp_sum;
temp_sum += forward_geometry[i].second;
BOOST_ASSERT(forward_data.distance >= temp_sum);
}
temp_sum = 0;
for (unsigned i = 0; i < reverse_geometry.size(); ++i)
{
temp_sum += reverse_geometry[reverse_geometry.size() - 1 - i].second;
reverse_dist_prefix_sum[i] = reverse_data.distance - temp_sum;
BOOST_ASSERT(reverse_data.distance >= temp_sum);
}
BOOST_ASSERT(forward_geometry.size() == reverse_geometry.size());
const unsigned geometry_size = forward_geometry.size();
BOOST_ASSERT(geometry_size > 1);
NodeID current_edge_start_coordinate_id = u;
if (forward_data.edgeBasedNodeID != SPECIAL_NODEID)
{
max_id = std::max(forward_data.edgeBasedNodeID, max_id);
}
if (SPECIAL_NODEID != reverse_data.edgeBasedNodeID)
{
max_id = std::max(reverse_data.edgeBasedNodeID, max_id);
}
// traverse arrays from start and end respectively
for (unsigned i = 0; i < geometry_size; ++i)
{
BOOST_ASSERT(current_edge_start_coordinate_id ==
reverse_geometry[geometry_size - 1 - i].first);
const NodeID current_edge_target_coordinate_id = forward_geometry[i].first;
BOOST_ASSERT(current_edge_target_coordinate_id != current_edge_start_coordinate_id);
// build edges
m_edge_based_node_list.emplace_back(forward_data.edgeBasedNodeID,
reverse_data.edgeBasedNodeID,
current_edge_start_coordinate_id,
current_edge_target_coordinate_id,
forward_data.nameID,
forward_geometry[i].second,
reverse_geometry[i].second,
forward_dist_prefix_sum[i],
reverse_dist_prefix_sum[i],
m_geometry_compressor.GetPositionForID(e1),
i,
belongs_to_tiny_cc);
current_edge_start_coordinate_id = current_edge_target_coordinate_id;
BOOST_ASSERT(m_edge_based_node_list.back().IsCompressed());
BOOST_ASSERT(u != m_edge_based_node_list.back().u ||
v != m_edge_based_node_list.back().v);
BOOST_ASSERT(u != m_edge_based_node_list.back().v ||
v != m_edge_based_node_list.back().u);
}
BOOST_ASSERT(current_edge_start_coordinate_id == v);
BOOST_ASSERT(m_edge_based_node_list.back().IsCompressed());
}
else
{
BOOST_ASSERT(!m_geometry_compressor.HasEntryForID(e2));
if (forward_data.edgeBasedNodeID != SPECIAL_NODEID)
{
BOOST_ASSERT(forward_data.forward);
}
if (reverse_data.edgeBasedNodeID != SPECIAL_NODEID)
{
BOOST_ASSERT(reverse_data.forward);
}
if (forward_data.edgeBasedNodeID == SPECIAL_NODEID)
{
BOOST_ASSERT(!forward_data.forward);
}
if (reverse_data.edgeBasedNodeID == SPECIAL_NODEID)
{
BOOST_ASSERT(!reverse_data.forward);
}
BOOST_ASSERT(forward_data.edgeBasedNodeID != SPECIAL_NODEID ||
reverse_data.edgeBasedNodeID != SPECIAL_NODEID);
m_edge_based_node_list.emplace_back(EdgeBasedNode(forward_data.edgeBasedNodeID,
reverse_data.edgeBasedNodeID,
u,
v,
forward_data.nameID,
forward_data.distance,
reverse_data.distance,
0,
0,
SPECIAL_EDGEID,
0,
belongs_to_tiny_cc));
BOOST_ASSERT(!m_edge_based_node_list.back().IsCompressed());
}
}
void EdgeBasedGraphFactory::FlushVectorToStream(
std::ofstream &edge_data_file, std::vector<OriginalEdgeData> &original_edge_data_vector) const
{
edge_data_file.write((char *)&(original_edge_data_vector[0]),
original_edge_data_vector.size() * sizeof(OriginalEdgeData));
original_edge_data_vector.clear();
}
void EdgeBasedGraphFactory::Run(const std::string &original_edge_data_filename,
const std::string &geometry_filename,
lua_State *lua_state)
{
TIMER_START(geometry);
CompressGeometry();
TIMER_STOP(geometry);
TIMER_START(renumber);
RenumberEdges();
TIMER_STOP(renumber);
TIMER_START(generate_nodes);
GenerateEdgeExpandedNodes();
TIMER_STOP(generate_nodes);
TIMER_START(generate_edges);
GenerateEdgeExpandedEdges(original_edge_data_filename, lua_state);
TIMER_STOP(generate_edges);
m_geometry_compressor.SerializeInternalVector(geometry_filename);
SimpleLogger().Write() << "Timing statistics for edge-expanded graph:";
SimpleLogger().Write() << "Geometry compression: " << TIMER_MSEC(geometry)*0.001 << "s";
SimpleLogger().Write() << "Renumbering edges: " << TIMER_MSEC(renumber)*0.001 << "s";
SimpleLogger().Write() << "Generating nodes: " << TIMER_MSEC(generate_nodes)*0.001 << "s";
SimpleLogger().Write() << "Generating edges: " << TIMER_MSEC(generate_edges)*0.001 << "s";
}
void EdgeBasedGraphFactory::CompressGeometry()
{
SimpleLogger().Write() << "Removing graph geometry while preserving topology";
const unsigned original_number_of_nodes = m_node_based_graph->GetNumberOfNodes();
const unsigned original_number_of_edges = m_node_based_graph->GetNumberOfEdges();
Percent p(original_number_of_nodes);
unsigned removed_node_count = 0;
for (NodeID v = 0; v < original_number_of_nodes; ++v)
{
p.printStatus(v);
// only contract degree 2 vertices
if (2 != m_node_based_graph->GetOutDegree(v))
{
continue;
}
// don't contract barrier node
if (m_barrier_nodes.end() != m_barrier_nodes.find(v))
{
continue;
}
// check if v is a via node for a turn restriction, i.e. a 'directed' barrier node
if (m_restriction_map->IsNodeAViaNode(v))
{
continue;
}
const bool reverse_edge_order =
!(m_node_based_graph->GetEdgeData(m_node_based_graph->BeginEdges(v)).forward);
const EdgeID forward_e2 = m_node_based_graph->BeginEdges(v) + reverse_edge_order;
BOOST_ASSERT(SPECIAL_EDGEID != forward_e2);
const EdgeID reverse_e2 = m_node_based_graph->BeginEdges(v) + 1 - reverse_edge_order;
BOOST_ASSERT(SPECIAL_EDGEID != reverse_e2);
const EdgeData &fwd_edge_data2 = m_node_based_graph->GetEdgeData(forward_e2);
const EdgeData &rev_edge_data2 = m_node_based_graph->GetEdgeData(reverse_e2);
const NodeID w = m_node_based_graph->GetTarget(forward_e2);
BOOST_ASSERT(SPECIAL_NODEID != w);
BOOST_ASSERT(v != w);
const NodeID u = m_node_based_graph->GetTarget(reverse_e2);
BOOST_ASSERT(SPECIAL_NODEID != u);
BOOST_ASSERT(u != v);
const EdgeID forward_e1 = m_node_based_graph->FindEdge(u, v);
BOOST_ASSERT(m_node_based_graph->EndEdges(u) != forward_e1);
BOOST_ASSERT(SPECIAL_EDGEID != forward_e1);
BOOST_ASSERT(v == m_node_based_graph->GetTarget(forward_e1));
const EdgeID reverse_e1 = m_node_based_graph->FindEdge(w, v);
BOOST_ASSERT(SPECIAL_EDGEID != reverse_e1);
BOOST_ASSERT(v == m_node_based_graph->GetTarget(reverse_e1));
const EdgeData &fwd_edge_data1 = m_node_based_graph->GetEdgeData(forward_e1);
const EdgeData &rev_edge_data1 = m_node_based_graph->GetEdgeData(reverse_e1);
if ((m_node_based_graph->FindEdge(u, w) != m_node_based_graph->EndEdges(u)) ||
(m_node_based_graph->FindEdge(w, u) != m_node_based_graph->EndEdges(w)))
{
continue;
}
if ( // TODO: rename to IsCompatibleTo
fwd_edge_data1.IsEqualTo(fwd_edge_data2) &&
rev_edge_data1.IsEqualTo(rev_edge_data2))
{
// Get distances before graph is modified
const int forward_weight1 = m_node_based_graph->GetEdgeData(forward_e1).distance;
const int forward_weight2 = m_node_based_graph->GetEdgeData(forward_e2).distance;
BOOST_ASSERT(0 != forward_weight1);
BOOST_ASSERT(0 != forward_weight2);
const int reverse_weight1 = m_node_based_graph->GetEdgeData(reverse_e1).distance;
const int reverse_weight2 = m_node_based_graph->GetEdgeData(reverse_e2).distance;
BOOST_ASSERT(0 != reverse_weight1);
BOOST_ASSERT(0 != forward_weight2);
const bool add_traffic_signal_penalty =
(m_traffic_lights.find(v) != m_traffic_lights.end());
// add weight of e2's to e1
m_node_based_graph->GetEdgeData(forward_e1).distance += fwd_edge_data2.distance;
m_node_based_graph->GetEdgeData(reverse_e1).distance += rev_edge_data2.distance;
if (add_traffic_signal_penalty)
{
m_node_based_graph->GetEdgeData(forward_e1).distance +=
speed_profile.trafficSignalPenalty;
m_node_based_graph->GetEdgeData(reverse_e1).distance +=
speed_profile.trafficSignalPenalty;
}
// extend e1's to targets of e2's
m_node_based_graph->SetTarget(forward_e1, w);
m_node_based_graph->SetTarget(reverse_e1, u);
// remove e2's (if bidir, otherwise only one)
m_node_based_graph->DeleteEdge(v, forward_e2);
m_node_based_graph->DeleteEdge(v, reverse_e2);
// update any involved turn restrictions
m_restriction_map->FixupStartingTurnRestriction(u, v, w);
m_restriction_map->FixupArrivingTurnRestriction(u, v, w);
m_restriction_map->FixupStartingTurnRestriction(w, v, u);
m_restriction_map->FixupArrivingTurnRestriction(w, v, u);
// store compressed geometry in container
m_geometry_compressor.CompressEdge(
forward_e1,
forward_e2,
v,
w,
forward_weight1 +
(add_traffic_signal_penalty ? speed_profile.trafficSignalPenalty : 0),
forward_weight2);
m_geometry_compressor.CompressEdge(
reverse_e1,
reverse_e2,
v,
u,
reverse_weight1,
reverse_weight2 +
(add_traffic_signal_penalty ? speed_profile.trafficSignalPenalty : 0));
++removed_node_count;
BOOST_ASSERT(m_node_based_graph->GetEdgeData(forward_e1).nameID ==
m_node_based_graph->GetEdgeData(reverse_e1).nameID);
}
}
SimpleLogger().Write() << "removed " << removed_node_count << " nodes";
m_geometry_compressor.PrintStatistics();
unsigned new_node_count = 0;
unsigned new_edge_count = 0;
for (unsigned i = 0; i < m_node_based_graph->GetNumberOfNodes(); ++i)
{
if (m_node_based_graph->GetOutDegree(i) > 0)
{
++new_node_count;
new_edge_count += (m_node_based_graph->EndEdges(i) - m_node_based_graph->BeginEdges(i));
}
}
SimpleLogger().Write() << "new nodes: " << new_node_count << ", edges " << new_edge_count;
SimpleLogger().Write() << "Node compression ratio: " << new_node_count /
(double)original_number_of_nodes;
SimpleLogger().Write() << "Edge compression ratio: " << new_edge_count /
(double)original_number_of_edges;
}
/**
* Writes the id of the edge in the edge expanded graph (into the egde in the node based graph)
*/
void EdgeBasedGraphFactory::RenumberEdges()
{
// renumber edge based node IDs
unsigned numbered_edges_count = 0;
for (NodeID current_node = 0; current_node < m_node_based_graph->GetNumberOfNodes();
++current_node)
{
for (EdgeID current_edge : m_node_based_graph->GetAdjacentEdgeRange(current_node))
{
EdgeData &edge_data = m_node_based_graph->GetEdgeData(current_edge);
if (!edge_data.forward)
{
continue;
}
BOOST_ASSERT(numbered_edges_count < m_node_based_graph->GetNumberOfEdges());
edge_data.edgeBasedNodeID = numbered_edges_count;
++numbered_edges_count;
BOOST_ASSERT(SPECIAL_NODEID != edge_data.edgeBasedNodeID);
}
}
m_number_of_edge_based_nodes = numbered_edges_count;
}
/**
* Creates the nodes in the edge expanded graph from edges in the node-based graph.
*/
void EdgeBasedGraphFactory::GenerateEdgeExpandedNodes()
{
SimpleLogger().Write() << "Identifying components of the road network";
// Run a BFS on the undirected graph and identify small components
BFSComponentExplorer<NodeBasedDynamicGraph> component_explorer(
*m_node_based_graph, *m_restriction_map, m_barrier_nodes);
component_explorer.run();
SimpleLogger().Write() << "identified: " << component_explorer.GetNumberOfComponents()
<< " many components";
SimpleLogger().Write() << "generating edge-expanded nodes";
Percent p(m_node_based_graph->GetNumberOfNodes());
// loop over all edges and generate new set of nodes
for (NodeID u = 0, end = m_node_based_graph->GetNumberOfNodes(); u < end; ++u)
{
BOOST_ASSERT(u != SPECIAL_NODEID);
BOOST_ASSERT(u < m_node_based_graph->GetNumberOfNodes());
p.printIncrement();
for (EdgeID e1 : m_node_based_graph->GetAdjacentEdgeRange(u))
{
const EdgeData &edge_data = m_node_based_graph->GetEdgeData(e1);
if (edge_data.edgeBasedNodeID == SPECIAL_NODEID)
{
// continue;
}
BOOST_ASSERT(e1 != SPECIAL_EDGEID);
const NodeID v = m_node_based_graph->GetTarget(e1);
BOOST_ASSERT(SPECIAL_NODEID != v);
// pick only every other edge
if (u > v)
{
continue;
}
BOOST_ASSERT(u < v);
BOOST_ASSERT(edge_data.type != SHRT_MAX);
// Note: edges that end on barrier nodes or on a turn restriction
// may actually be in two distinct components. We choose the smallest
const unsigned size_of_component = std::min(component_explorer.GetComponentSize(u),
component_explorer.GetComponentSize(v));
const bool component_is_tiny = (size_of_component < 1000);
InsertEdgeBasedNode(u, v, e1, component_is_tiny);
}
}
SimpleLogger().Write() << "Generated " << m_edge_based_node_list.size()
<< " nodes in edge-expanded graph";
}
/**
* Actually it also generates OriginalEdgeData and serializes them...
*/
void
EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(const std::string &original_edge_data_filename,
lua_State *lua_state)
{
SimpleLogger().Write() << "generating edge-expanded edges";
unsigned node_based_edge_counter = 0;
unsigned original_edges_counter = 0;
std::ofstream edge_data_file(original_edge_data_filename.c_str(), std::ios::binary);
// writes a dummy value that is updated later
edge_data_file.write((char *)&original_edges_counter, sizeof(unsigned));
std::vector<OriginalEdgeData> original_edge_data_vector;
original_edge_data_vector.reserve(1024 * 1024);
// Loop over all turns and generate new set of edges.
// Three nested loop look super-linear, but we are dealing with a (kind of)
// linear number of turns only.
unsigned restricted_turns_counter = 0;
unsigned skipped_uturns_counter = 0;
unsigned skipped_barrier_turns_counter = 0;
unsigned compressed = 0;
Percent p(m_node_based_graph->GetNumberOfNodes());
for (NodeID u = 0, end = m_node_based_graph->GetNumberOfNodes(); u < end; ++u)
{
for (EdgeID e1 : m_node_based_graph->GetAdjacentEdgeRange(u))
{
if (!m_node_based_graph->GetEdgeData(e1).forward)
{
continue;
}
++node_based_edge_counter;
const NodeID v = m_node_based_graph->GetTarget(e1);
const NodeID to_node_of_only_restriction =
m_restriction_map->CheckForEmanatingIsOnlyTurn(u, v);
const bool is_barrier_node = (m_barrier_nodes.find(v) != m_barrier_nodes.end());
for (EdgeID e2 : m_node_based_graph->GetAdjacentEdgeRange(v))
{
if (!m_node_based_graph->GetEdgeData(e2).forward)
{
continue;
}
const NodeID w = m_node_based_graph->GetTarget(e2);
if ((to_node_of_only_restriction != SPECIAL_NODEID) &&
(w != to_node_of_only_restriction))
{
// We are at an only_-restriction but not at the right turn.
++restricted_turns_counter;
continue;
}
if (is_barrier_node)
{
if (u != w)
{
++skipped_barrier_turns_counter;
continue;
}
}
else
{
if ((u == w) && (m_node_based_graph->GetOutDegree(v) > 1))
{
++skipped_uturns_counter;
continue;
}
}
// only add an edge if turn is not a U-turn except when it is
// at the end of a dead-end street
if (m_restriction_map->CheckIfTurnIsRestricted(u, v, w) &&
(to_node_of_only_restriction == SPECIAL_NODEID) &&
(w != to_node_of_only_restriction))
{
++restricted_turns_counter;
continue;
}
// only add an edge if turn is not prohibited
const EdgeData &edge_data1 = m_node_based_graph->GetEdgeData(e1);
const EdgeData &edge_data2 = m_node_based_graph->GetEdgeData(e2);
BOOST_ASSERT(edge_data1.edgeBasedNodeID != edge_data2.edgeBasedNodeID);
BOOST_ASSERT(edge_data1.forward);
BOOST_ASSERT(edge_data2.forward);
// the following is the core of the loop.
unsigned distance = edge_data1.distance;
if (m_traffic_lights.find(v) != m_traffic_lights.end())
{
distance += speed_profile.trafficSignalPenalty;
}
const double angle = GetAngleBetweenThreeFixedPointCoordinates(
m_node_info_list[u], m_node_info_list[v], m_node_info_list[w]);
const int turn_penalty = GetTurnPenalty(angle, lua_state);
TurnInstruction turn_instruction = AnalyzeTurn(u, v, w, angle);
if (turn_instruction == TurnInstruction::UTurn)
{
distance += speed_profile.uTurnPenalty;
}
distance += turn_penalty;
const bool edge_is_compressed = m_geometry_compressor.HasEntryForID(e1);
if (edge_is_compressed)
{
++compressed;
}
original_edge_data_vector.emplace_back(
(edge_is_compressed ? m_geometry_compressor.GetPositionForID(e1) : v),
edge_data1.nameID,
turn_instruction,
edge_is_compressed);
++original_edges_counter;
if (original_edge_data_vector.size() > 1024 * 1024 * 10)
{
FlushVectorToStream(edge_data_file, original_edge_data_vector);
}
BOOST_ASSERT(SPECIAL_NODEID != edge_data1.edgeBasedNodeID);
BOOST_ASSERT(SPECIAL_NODEID != edge_data2.edgeBasedNodeID);
m_edge_based_edge_list.emplace_back(EdgeBasedEdge(edge_data1.edgeBasedNodeID,
edge_data2.edgeBasedNodeID,
m_edge_based_edge_list.size(),
distance,
true,
false));
}
}
p.printIncrement();
}
FlushVectorToStream(edge_data_file, original_edge_data_vector);
edge_data_file.seekp(std::ios::beg);
edge_data_file.write((char *)&original_edges_counter, sizeof(unsigned));
edge_data_file.close();
SimpleLogger().Write() << "Generated " << m_edge_based_node_list.size() << " edge based nodes";
SimpleLogger().Write() << "Node-based graph contains " << node_based_edge_counter << " edges";
SimpleLogger().Write() << "Edge-expanded graph ...";
SimpleLogger().Write() << " contains " << m_edge_based_edge_list.size() << " edges";
SimpleLogger().Write() << " skips " << restricted_turns_counter << " turns, "
"defined by "
<< m_restriction_map->size() << " restrictions";
SimpleLogger().Write() << " skips " << skipped_uturns_counter << " U turns";
SimpleLogger().Write() << " skips " << skipped_barrier_turns_counter << " turns over barriers";
}
int EdgeBasedGraphFactory::GetTurnPenalty(double angle, lua_State *lua_state) const
{
if (speed_profile.has_turn_penalty_function)
{
try
{
// call lua profile to compute turn penalty
return luabind::call_function<int>(lua_state, "turn_function", 180. - angle);
}
catch (const luabind::error &er) { SimpleLogger().Write(logWARNING) << er.what(); }
}
return 0;
}
TurnInstruction EdgeBasedGraphFactory::AnalyzeTurn(const NodeID u,
const NodeID v,
const NodeID w,
double angle)
const
{
if (u == w)
{
return TurnInstruction::UTurn;
}
const EdgeID edge1 = m_node_based_graph->FindEdge(u, v);
const EdgeID edge2 = m_node_based_graph->FindEdge(v, w);
const EdgeData &data1 = m_node_based_graph->GetEdgeData(edge1);
const EdgeData &data2 = m_node_based_graph->GetEdgeData(edge2);
if (!data1.contraFlow && data2.contraFlow)
{
return TurnInstruction::EnterAgainstAllowedDirection;
}
if (data1.contraFlow && !data2.contraFlow)
{
return TurnInstruction::LeaveAgainstAllowedDirection;
}
// roundabouts need to be handled explicitely
if (data1.roundabout && data2.roundabout)
{
// Is a turn possible? If yes, we stay on the roundabout!
if (1 == m_node_based_graph->GetDirectedOutDegree(v))
{
// No turn possible.
return TurnInstruction::NoTurn;
}
return TurnInstruction::StayOnRoundAbout;
}
// Does turn start or end on roundabout?
if (data1.roundabout || data2.roundabout)
{
// We are entering the roundabout
if ((!data1.roundabout) && data2.roundabout)
{
return TurnInstruction::EnterRoundAbout;
}
// We are leaving the roundabout
if (data1.roundabout && (!data2.roundabout))
{
return TurnInstruction::LeaveRoundAbout;
}
}
// If street names stay the same and if we are certain that it is not a
// a segment of a roundabout, we skip it.
if (data1.nameID == data2.nameID)
{
// TODO: Here we should also do a small graph exploration to check for
// more complex situations
if (0 != data1.nameID)
{
return TurnInstruction::NoTurn;
}
else if (m_node_based_graph->GetOutDegree(v) <= 2)
{
return TurnInstruction::NoTurn;
}
}
return TurnInstructionsClass::GetTurnDirectionOfInstruction(angle);
}
unsigned EdgeBasedGraphFactory::GetNumberOfEdgeBasedNodes() const
{
return m_number_of_edge_based_nodes;
}

View File

@ -0,0 +1,128 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// This class constructs the edge-expanded routing graph
#ifndef EDGEBASEDGRAPHFACTORY_H_
#define EDGEBASEDGRAPHFACTORY_H_
#include "../typedefs.h"
#include "../DataStructures/DeallocatingVector.h"
#include "../DataStructures/DynamicGraph.h"
#include "../DataStructures/EdgeBasedNode.h"
#include "../DataStructures/OriginalEdgeData.h"
#include "../DataStructures/QueryNode.h"
#include "../DataStructures/TurnInstructions.h"
#include "../DataStructures/Restriction.h"
#include "../DataStructures/NodeBasedGraph.h"
#include "../DataStructures/RestrictionMap.h"
#include "GeometryCompressor.h"
#include <algorithm>
#include <iosfwd>
#include <memory>
#include <queue>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
struct lua_State;
class EdgeBasedGraphFactory
{
public:
EdgeBasedGraphFactory() = delete;
EdgeBasedGraphFactory(const EdgeBasedGraphFactory &) = delete;
struct SpeedProfileProperties;
explicit EdgeBasedGraphFactory(const std::shared_ptr<NodeBasedDynamicGraph> &node_based_graph,
std::unique_ptr<RestrictionMap> restricion_map,
std::vector<NodeID> &barrier_node_list,
std::vector<NodeID> &traffic_light_node_list,
std::vector<NodeInfo> &m_node_info_list,
SpeedProfileProperties &speed_profile);
void Run(const std::string &original_edge_data_filename,
const std::string &geometry_filename,
lua_State *lua_state);
void GetEdgeBasedEdges(DeallocatingVector<EdgeBasedEdge> &edges);
void GetEdgeBasedNodes(std::vector<EdgeBasedNode> &nodes);
TurnInstruction AnalyzeTurn(const NodeID u, const NodeID v, const NodeID w, double angle) const;
int GetTurnPenalty(double angle, lua_State *lua_state) const;
unsigned GetNumberOfEdgeBasedNodes() const;
struct SpeedProfileProperties
{
SpeedProfileProperties()
: trafficSignalPenalty(0), uTurnPenalty(0), has_turn_penalty_function(false)
{
}
int trafficSignalPenalty;
int uTurnPenalty;
bool has_turn_penalty_function;
} speed_profile;
private:
typedef NodeBasedDynamicGraph::EdgeData EdgeData;
unsigned m_number_of_edge_based_nodes;
std::vector<NodeInfo> m_node_info_list;
std::vector<EdgeBasedNode> m_edge_based_node_list;
DeallocatingVector<EdgeBasedEdge> m_edge_based_edge_list;
std::shared_ptr<NodeBasedDynamicGraph> m_node_based_graph;
std::unordered_set<NodeID> m_barrier_nodes;
std::unordered_set<NodeID> m_traffic_lights;
std::unique_ptr<RestrictionMap> m_restriction_map;
GeometryCompressor m_geometry_compressor;
void CompressGeometry();
void RenumberEdges();
void GenerateEdgeExpandedNodes();
void GenerateEdgeExpandedEdges(const std::string &original_edge_data_filename,
lua_State *lua_state);
void InsertEdgeBasedNode(NodeID u, NodeID v, EdgeID e1, bool belongsToTinyComponent);
void FlushVectorToStream(std::ofstream &edge_data_file,
std::vector<OriginalEdgeData> &original_edge_data_vector) const;
unsigned max_id;
};
#endif /* EDGEBASEDGRAPHFACTORY_H_ */

View File

@ -0,0 +1,225 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "GeometryCompressor.h"
#include "../Util/SimpleLogger.h"
#include <boost/assert.hpp>
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
#include <limits>
#include <string>
int current_free_list_maximum = 0;
int UniqueNumber() { return ++current_free_list_maximum; }
GeometryCompressor::GeometryCompressor()
{
m_free_list.reserve(100);
IncreaseFreeList();
}
void GeometryCompressor::IncreaseFreeList()
{
m_compressed_geometries.resize(m_compressed_geometries.size() + 100);
for (unsigned i = 100; i > 0; --i)
{
m_free_list.emplace_back(current_free_list_maximum);
++current_free_list_maximum;
}
}
bool GeometryCompressor::HasEntryForID(const EdgeID edge_id) const
{
return (m_edge_id_to_list_index_map.find(edge_id) != m_edge_id_to_list_index_map.end());
}
unsigned GeometryCompressor::GetPositionForID(const EdgeID edge_id) const
{
auto map_iterator = m_edge_id_to_list_index_map.find(edge_id);
BOOST_ASSERT(map_iterator != m_edge_id_to_list_index_map.end());
BOOST_ASSERT(map_iterator->second < m_compressed_geometries.size());
return map_iterator->second;
}
void GeometryCompressor::SerializeInternalVector(const std::string &path) const
{
boost::filesystem::fstream geometry_out_stream(path, std::ios::binary | std::ios::out);
const unsigned number_of_compressed_geometries = m_compressed_geometries.size() + 1;
BOOST_ASSERT(std::numeric_limits<unsigned>::max() != number_of_compressed_geometries);
geometry_out_stream.write((char *)&number_of_compressed_geometries, sizeof(unsigned));
// write indices array
unsigned prefix_sum_of_list_indices = 0;
for (const auto &elem : m_compressed_geometries)
{
geometry_out_stream.write((char *)&prefix_sum_of_list_indices, sizeof(unsigned));
const std::vector<CompressedNode> &current_vector = elem;
const unsigned unpacked_size = current_vector.size();
BOOST_ASSERT(std::numeric_limits<unsigned>::max() != unpacked_size);
prefix_sum_of_list_indices += unpacked_size;
}
// sentinel element
geometry_out_stream.write((char *)&prefix_sum_of_list_indices, sizeof(unsigned));
// number of geometry entries to follow, it is the (inclusive) prefix sum
geometry_out_stream.write((char *)&prefix_sum_of_list_indices, sizeof(unsigned));
unsigned control_sum = 0;
// write compressed geometries
for (auto &elem : m_compressed_geometries)
{
const std::vector<CompressedNode> &current_vector = elem;
const unsigned unpacked_size = current_vector.size();
control_sum += unpacked_size;
BOOST_ASSERT(std::numeric_limits<unsigned>::max() != unpacked_size);
for (const CompressedNode current_node : current_vector)
{
geometry_out_stream.write((char *)&(current_node.first), sizeof(NodeID));
}
}
BOOST_ASSERT(control_sum == prefix_sum_of_list_indices);
// all done, let's close the resource
geometry_out_stream.close();
}
void GeometryCompressor::CompressEdge(const EdgeID edge_id_1,
const EdgeID edge_id_2,
const NodeID via_node_id,
const NodeID target_node_id,
const EdgeWeight weight1,
const EdgeWeight weight2)
{
// remove super-trivial geometries
BOOST_ASSERT(SPECIAL_EDGEID != edge_id_1);
BOOST_ASSERT(SPECIAL_EDGEID != edge_id_2);
BOOST_ASSERT(SPECIAL_NODEID != via_node_id);
BOOST_ASSERT(SPECIAL_NODEID != target_node_id);
BOOST_ASSERT(INVALID_EDGE_WEIGHT != weight1);
BOOST_ASSERT(INVALID_EDGE_WEIGHT != weight2);
// append list of removed edge_id plus via node to surviving edge id:
// <surv_1, .. , surv_n, via_node_id, rem_1, .. rem_n
//
// General scheme:
// 1. append via node id to list of edge_id_1
// 2. find list for edge_id_2, if yes add all elements and delete it
// Add via node id. List is created if it does not exist
if (!HasEntryForID(edge_id_1))
{
// create a new entry in the map
if (0 == m_free_list.size())
{
// make sure there is a place to put the entries
IncreaseFreeList();
}
BOOST_ASSERT(!m_free_list.empty());
m_edge_id_to_list_index_map[edge_id_1] = m_free_list.back();
m_free_list.pop_back();
}
const auto iter = m_edge_id_to_list_index_map.find(edge_id_1);
BOOST_ASSERT(iter != m_edge_id_to_list_index_map.end());
const unsigned edge_bucket_id1 = iter->second;
BOOST_ASSERT(edge_bucket_id1 == GetPositionForID(edge_id_1));
BOOST_ASSERT(edge_bucket_id1 < m_compressed_geometries.size());
std::vector<CompressedNode> &edge_bucket_list1 = m_compressed_geometries[edge_bucket_id1];
if (edge_bucket_list1.empty())
{
edge_bucket_list1.emplace_back(via_node_id, weight1);
}
BOOST_ASSERT(0 < edge_bucket_list1.size());
BOOST_ASSERT(!edge_bucket_list1.empty());
if (HasEntryForID(edge_id_2))
{
// second edge is not atomic anymore
const unsigned list_to_remove_index = GetPositionForID(edge_id_2);
BOOST_ASSERT(list_to_remove_index < m_compressed_geometries.size());
std::vector<CompressedNode> &edge_bucket_list2 =
m_compressed_geometries[list_to_remove_index];
// found an existing list, append it to the list of edge_id_1
edge_bucket_list1.insert(
edge_bucket_list1.end(), edge_bucket_list2.begin(), edge_bucket_list2.end());
// remove the list of edge_id_2
m_edge_id_to_list_index_map.erase(edge_id_2);
BOOST_ASSERT(m_edge_id_to_list_index_map.end() ==
m_edge_id_to_list_index_map.find(edge_id_2));
edge_bucket_list2.clear();
BOOST_ASSERT(0 == edge_bucket_list2.size());
m_free_list.emplace_back(list_to_remove_index);
BOOST_ASSERT(list_to_remove_index == m_free_list.back());
}
else
{
// we are certain that the second edge is atomic.
edge_bucket_list1.emplace_back(target_node_id, weight2);
}
}
void GeometryCompressor::PrintStatistics() const
{
const uint64_t compressed_edges = m_compressed_geometries.size();
BOOST_ASSERT(0 == compressed_edges % 2);
BOOST_ASSERT(m_compressed_geometries.size() + m_free_list.size() > 0);
uint64_t number_of_compressed_geometries = 0;
uint64_t longest_chain_length = 0;
for (const std::vector<CompressedNode> &current_vector : m_compressed_geometries)
{
number_of_compressed_geometries += current_vector.size();
longest_chain_length = std::max(longest_chain_length, (uint64_t)current_vector.size());
}
SimpleLogger().Write() << "Geometry successfully removed:"
"\n compressed edges: " << compressed_edges
<< "\n compressed geometries: " << number_of_compressed_geometries
<< "\n longest chain length: " << longest_chain_length
<< "\n cmpr ratio: "
<< ((float)compressed_edges /
std::max(number_of_compressed_geometries, (uint64_t)1))
<< "\n avg chain length: "
<< (float)number_of_compressed_geometries /
std::max((uint64_t)1, compressed_edges);
}
const std::vector<GeometryCompressor::CompressedNode> &
GeometryCompressor::GetBucketReference(const EdgeID edge_id) const
{
const unsigned index = m_edge_id_to_list_index_map.at(edge_id);
return m_compressed_geometries.at(index);
}

View File

@ -0,0 +1,65 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "../typedefs.h"
#include <unordered_map>
#include <string>
#include <vector>
#ifndef GEOMETRY_COMPRESSOR_H
#define GEOMETRY_COMPRESSOR_H
class GeometryCompressor
{
public:
typedef std::pair<NodeID, EdgeWeight> CompressedNode;
GeometryCompressor();
void CompressEdge(const EdgeID surviving_edge_id,
const EdgeID removed_edge_id,
const NodeID via_node_id,
const NodeID target_node,
const EdgeWeight weight1,
const EdgeWeight weight2);
bool HasEntryForID(const EdgeID edge_id) const;
void PrintStatistics() const;
void SerializeInternalVector(const std::string &path) const;
unsigned GetPositionForID(const EdgeID edge_id) const;
const std::vector<GeometryCompressor::CompressedNode> &
GetBucketReference(const EdgeID edge_id) const;
private:
void IncreaseFreeList();
std::vector<std::vector<CompressedNode>> m_compressed_geometries;
std::vector<unsigned> m_free_list;
std::unordered_map<EdgeID, unsigned> m_edge_id_to_list_index_map;
};
#endif // GEOMETRY_COMPRESSOR_H

View File

@ -0,0 +1,172 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "TemporaryStorage.h"
StreamData::StreamData()
: write_mode(true), temp_path(boost::filesystem::unique_path(temp_directory.append(
TemporaryFilePattern.begin(), TemporaryFilePattern.end()))),
temp_file(new boost::filesystem::fstream(
temp_path, std::ios::in | std::ios::out | std::ios::trunc | std::ios::binary)),
readWriteMutex(std::make_shared<boost::mutex>())
{
if (temp_file->fail())
{
throw OSRMException("temporary file could not be created");
}
}
TemporaryStorage::TemporaryStorage() { temp_directory = boost::filesystem::temp_directory_path(); }
TemporaryStorage &TemporaryStorage::GetInstance()
{
static TemporaryStorage static_instance;
return static_instance;
}
TemporaryStorage::~TemporaryStorage() { RemoveAll(); }
void TemporaryStorage::RemoveAll()
{
boost::mutex::scoped_lock lock(mutex);
for (unsigned slot_id = 0; slot_id < stream_data_list.size(); ++slot_id)
{
DeallocateSlot(slot_id);
}
stream_data_list.clear();
}
int TemporaryStorage::AllocateSlot()
{
boost::mutex::scoped_lock lock(mutex);
try { stream_data_list.push_back(StreamData()); }
catch (boost::filesystem::filesystem_error &e) { Abort(e); }
CheckIfTemporaryDeviceFull();
return stream_data_list.size() - 1;
}
void TemporaryStorage::DeallocateSlot(const int slot_id)
{
try
{
StreamData &data = stream_data_list[slot_id];
boost::mutex::scoped_lock lock(*data.readWriteMutex);
if (!boost::filesystem::exists(data.temp_path))
{
return;
}
if (data.temp_file->is_open())
{
data.temp_file->close();
}
boost::filesystem::remove(data.temp_path);
}
catch (boost::filesystem::filesystem_error &e) { Abort(e); }
}
void TemporaryStorage::WriteToSlot(const int slot_id, char *pointer, const std::size_t size)
{
try
{
StreamData &data = stream_data_list[slot_id];
BOOST_ASSERT(data.write_mode);
boost::mutex::scoped_lock lock(*data.readWriteMutex);
BOOST_ASSERT_MSG(data.write_mode, "Writing after first read is not allowed");
if (1073741824 < data.buffer.size())
{
data.temp_file->write(&data.buffer[0], data.buffer.size());
// data.temp_file->write(pointer, size);
data.buffer.clear();
CheckIfTemporaryDeviceFull();
}
data.buffer.insert(data.buffer.end(), pointer, pointer + size);
}
catch (boost::filesystem::filesystem_error &e) { Abort(e); }
}
void TemporaryStorage::ReadFromSlot(const int slot_id, char *pointer, const std::size_t size)
{
try
{
StreamData &data = stream_data_list[slot_id];
boost::mutex::scoped_lock lock(*data.readWriteMutex);
if (data.write_mode)
{
data.write_mode = false;
data.temp_file->write(&data.buffer[0], data.buffer.size());
data.buffer.clear();
data.temp_file->seekg(data.temp_file->beg);
BOOST_ASSERT(data.temp_file->beg == data.temp_file->tellg());
}
BOOST_ASSERT(!data.write_mode);
data.temp_file->read(pointer, size);
}
catch (boost::filesystem::filesystem_error &e) { Abort(e); }
}
uint64_t TemporaryStorage::GetFreeBytesOnTemporaryDevice()
{
uint64_t value = -1;
try
{
boost::filesystem::path p = boost::filesystem::temp_directory_path();
boost::filesystem::space_info s = boost::filesystem::space(p);
value = s.free;
}
catch (boost::filesystem::filesystem_error &e) { Abort(e); }
return value;
}
void TemporaryStorage::CheckIfTemporaryDeviceFull()
{
boost::filesystem::path p = boost::filesystem::temp_directory_path();
boost::filesystem::space_info s = boost::filesystem::space(p);
if ((1024 * 1024) > s.free)
{
throw OSRMException("temporary device is full");
}
}
boost::filesystem::fstream::pos_type TemporaryStorage::Tell(const int slot_id)
{
boost::filesystem::fstream::pos_type position;
try
{
StreamData &data = stream_data_list[slot_id];
boost::mutex::scoped_lock lock(*data.readWriteMutex);
position = data.temp_file->tellp();
}
catch (boost::filesystem::filesystem_error &e) { Abort(e); }
return position;
}
void TemporaryStorage::Abort(const boost::filesystem::filesystem_error &e)
{
RemoveAll();
throw OSRMException(e.what());
}

View File

@ -0,0 +1,96 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TEMPORARYSTORAGE_H_
#define TEMPORARYSTORAGE_H_
#include "../Util/BoostFileSystemFix.h"
#include "../Util/OSRMException.h"
#include "../Util/SimpleLogger.h"
#include "../typedefs.h"
#include <boost/assert.hpp>
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/thread/mutex.hpp>
#include <cstdint>
#include <vector>
#include <fstream>
#include <memory>
struct StreamData
{
bool write_mode;
boost::filesystem::path temp_path;
std::shared_ptr<boost::filesystem::fstream> temp_file;
std::shared_ptr<boost::mutex> readWriteMutex;
std::vector<char> buffer;
StreamData();
};
// This class implements a singleton file storage for temporary data.
// temporary slots can be accessed by other objects through an int
// On deallocation every slot gets deallocated
//
// Access is sequential, which means, that there is no random access
// -> Data is written in first phase and reread in second.
static boost::filesystem::path temp_directory;
static std::string TemporaryFilePattern("OSRM-%%%%-%%%%-%%%%");
class TemporaryStorage
{
public:
static TemporaryStorage &GetInstance();
virtual ~TemporaryStorage();
int AllocateSlot();
void DeallocateSlot(const int slot_id);
void WriteToSlot(const int slot_id, char *pointer, const std::size_t size);
void ReadFromSlot(const int slot_id, char *pointer, const std::size_t size);
// returns the number of free bytes
uint64_t GetFreeBytesOnTemporaryDevice();
boost::filesystem::fstream::pos_type Tell(const int slot_id);
void RemoveAll();
private:
TemporaryStorage();
TemporaryStorage(TemporaryStorage const &) {};
TemporaryStorage &operator=(TemporaryStorage const &) { return *this; }
void Abort(const boost::filesystem::filesystem_error &e);
void CheckIfTemporaryDeviceFull();
// vector of file streams that is used to store temporary data
boost::mutex mutex;
std::vector<StreamData> stream_data_list;
};
#endif /* TEMPORARYSTORAGE_H_ */

289
DataStructures/BinaryHeap.h Normal file
View File

@ -0,0 +1,289 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef BINARY_HEAP_H
#define BINARY_HEAP_H
#include <boost/assert.hpp>
#include <algorithm>
#include <limits>
#include <map>
#include <type_traits>
#include <unordered_map>
#include <vector>
template <typename NodeID, typename Key> class ArrayStorage
{
public:
explicit ArrayStorage(size_t size) : positions(new Key[size])
{
memset(positions, 0, size * sizeof(Key));
}
~ArrayStorage() { delete[] positions; }
Key &operator[](NodeID node) { return positions[node]; }
void Clear() {}
private:
Key *positions;
};
template <typename NodeID, typename Key> class MapStorage
{
public:
explicit MapStorage(size_t) {}
Key &operator[](NodeID node) { return nodes[node]; }
void Clear() { nodes.clear(); }
private:
std::map<NodeID, Key> nodes;
};
template <typename NodeID, typename Key> class UnorderedMapStorage
{
public:
explicit UnorderedMapStorage(size_t) { nodes.rehash(1000); }
Key &operator[](const NodeID node) { return nodes[node]; }
Key const &operator[](const NodeID node) const
{
auto iter = nodes.find(node);
return iter->second;
}
void Clear() { nodes.clear(); }
private:
std::unordered_map<NodeID, Key> nodes;
};
template <typename NodeID,
typename Key,
typename Weight,
typename Data,
typename IndexStorage = ArrayStorage<NodeID, NodeID>>
class BinaryHeap
{
private:
BinaryHeap(const BinaryHeap &right);
void operator=(const BinaryHeap &right);
public:
typedef Weight WeightType;
typedef Data DataType;
explicit BinaryHeap(size_t maxID) : node_index(maxID) { Clear(); }
void Clear()
{
heap.resize(1);
inserted_nodes.clear();
heap[0].weight = std::numeric_limits<Weight>::min();
node_index.Clear();
}
std::size_t Size() const { return (heap.size() - 1); }
bool Empty() const { return 0 == Size(); }
void Insert(NodeID node, Weight weight, const Data &data)
{
HeapElement element;
element.index = static_cast<NodeID>(inserted_nodes.size());
element.weight = weight;
const Key key = static_cast<Key>(heap.size());
heap.emplace_back(element);
inserted_nodes.emplace_back(node, key, weight, data);
node_index[node] = element.index;
Upheap(key);
CheckHeap();
}
Data &GetData(NodeID node)
{
const Key index = node_index[node];
return inserted_nodes[index].data;
}
Data const &GetData(NodeID node) const
{
const Key index = node_index[node];
return inserted_nodes[index].data;
}
Weight &GetKey(NodeID node)
{
const Key index = node_index[node];
return inserted_nodes[index].weight;
}
bool WasRemoved(const NodeID node)
{
BOOST_ASSERT(WasInserted(node));
const Key index = node_index[node];
return inserted_nodes[index].key == 0;
}
bool WasInserted(const NodeID node)
{
const Key index = node_index[node];
if (index >= static_cast<Key>(inserted_nodes.size()))
{
return false;
}
return inserted_nodes[index].node == node;
}
NodeID Min() const
{
BOOST_ASSERT(heap.size() > 1);
return inserted_nodes[heap[1].index].node;
}
NodeID DeleteMin()
{
BOOST_ASSERT(heap.size() > 1);
const Key removedIndex = heap[1].index;
heap[1] = heap[heap.size() - 1];
heap.pop_back();
if (heap.size() > 1)
{
Downheap(1);
}
inserted_nodes[removedIndex].key = 0;
CheckHeap();
return inserted_nodes[removedIndex].node;
}
void DeleteAll()
{
auto iend = heap.end();
for (typename std::vector<HeapElement>::iterator i = heap.begin() + 1; i != iend; ++i)
{
inserted_nodes[i->index].key = 0;
}
heap.resize(1);
heap[0].weight = (std::numeric_limits<Weight>::min)();
}
void DecreaseKey(NodeID node, Weight weight)
{
BOOST_ASSERT(std::numeric_limits<NodeID>::max() != node);
const Key &index = node_index[node];
Key &key = inserted_nodes[index].key;
BOOST_ASSERT(key >= 0);
inserted_nodes[index].weight = weight;
heap[key].weight = weight;
Upheap(key);
CheckHeap();
}
private:
class HeapNode
{
public:
HeapNode(NodeID n, Key k, Weight w, Data d) : node(n), key(k), weight(w), data(d) {}
NodeID node;
Key key;
Weight weight;
Data data;
};
struct HeapElement
{
Key index;
Weight weight;
};
std::vector<HeapNode> inserted_nodes;
std::vector<HeapElement> heap;
IndexStorage node_index;
void Downheap(Key key)
{
const Key droppingIndex = heap[key].index;
const Weight weight = heap[key].weight;
Key nextKey = key << 1;
while (nextKey < static_cast<Key>(heap.size()))
{
const Key nextKeyOther = nextKey + 1;
if ((nextKeyOther < static_cast<Key>(heap.size())) &&
(heap[nextKey].weight > heap[nextKeyOther].weight))
{
nextKey = nextKeyOther;
}
if (weight <= heap[nextKey].weight)
{
break;
}
heap[key] = heap[nextKey];
inserted_nodes[heap[key].index].key = key;
key = nextKey;
nextKey <<= 1;
}
heap[key].index = droppingIndex;
heap[key].weight = weight;
inserted_nodes[droppingIndex].key = key;
}
void Upheap(Key key)
{
const Key risingIndex = heap[key].index;
const Weight weight = heap[key].weight;
Key nextKey = key >> 1;
while (heap[nextKey].weight > weight)
{
BOOST_ASSERT(nextKey != 0);
heap[key] = heap[nextKey];
inserted_nodes[heap[key].index].key = key;
key = nextKey;
nextKey >>= 1;
}
heap[key].index = risingIndex;
heap[key].weight = weight;
inserted_nodes[risingIndex].key = key;
}
void CheckHeap()
{
#ifndef NDEBUG
for (Key i = 2; i < (Key)heap.size(); ++i)
{
BOOST_ASSERT(heap[i].weight >= heap[i >> 1].weight);
}
#endif
}
};
#endif // BINARY_HEAP_H

View File

@ -0,0 +1,88 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CONCURRENT_QUEUE_H
#define CONCURRENT_QUEUE_H
#include "../typedefs.h"
#include <boost/circular_buffer.hpp>
#include <condition_variable>
#include <mutex>
template <typename Data> class ConcurrentQueue
{
public:
explicit ConcurrentQueue(const size_t max_size) : m_internal_queue(max_size) {}
inline void push(const Data &data)
{
std::unique_lock<std::mutex> lock(m_mutex);
m_not_full.wait(lock,
[this]
{ return m_internal_queue.size() < m_internal_queue.capacity(); });
m_internal_queue.push_back(data);
m_mutex.unlock();
m_not_empty.notify_one();
}
inline bool empty() const { return m_internal_queue.empty(); }
inline void wait_and_pop(Data &popped_value)
{
std::unique_lock<std::mutex> lock(m_mutex);
m_not_empty.wait(lock,
[this]
{ return !m_internal_queue.empty(); });
popped_value = m_internal_queue.front();
m_internal_queue.pop_front();
m_mutex.unlock();
m_not_full.notify_one();
}
inline bool try_pop(Data &popped_value)
{
std::unique_lock<std::mutex> lock(m_mutex);
if (m_internal_queue.empty())
{
return false;
}
popped_value = m_internal_queue.front();
m_internal_queue.pop_front();
m_mutex.unlock();
m_not_full.notify_one();
return true;
}
private:
boost::circular_buffer<Data> m_internal_queue;
std::mutex m_mutex;
std::condition_variable m_not_empty;
std::condition_variable m_not_full;
};
#endif // CONCURRENT_QUEUE_H

View File

@ -0,0 +1,380 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <osrm/Coordinate.h>
#include "../Util/MercatorUtil.h"
#include "../Util/SimpleLogger.h"
#include "../Util/StringUtil.h"
#include "../Util/TrigonometryTables.h"
#include <boost/assert.hpp>
#ifndef NDEBUG
#include <bitset>
#endif
#include <iostream>
#include <limits>
FixedPointCoordinate::FixedPointCoordinate()
: lat(std::numeric_limits<int>::min()), lon(std::numeric_limits<int>::min())
{
}
FixedPointCoordinate::FixedPointCoordinate(int lat, int lon) : lat(lat), lon(lon)
{
#ifndef NDEBUG
if (0 != (std::abs(lat) >> 30))
{
std::bitset<32> y(lat);
SimpleLogger().Write(logDEBUG) << "broken lat: " << lat << ", bits: " << y;
}
if (0 != (std::abs(lon) >> 30))
{
std::bitset<32> x(lon);
SimpleLogger().Write(logDEBUG) << "broken lon: " << lon << ", bits: " << x;
}
#endif
}
void FixedPointCoordinate::Reset()
{
lat = std::numeric_limits<int>::min();
lon = std::numeric_limits<int>::min();
}
bool FixedPointCoordinate::isSet() const
{
return (std::numeric_limits<int>::min() != lat) && (std::numeric_limits<int>::min() != lon);
}
bool FixedPointCoordinate::isValid() const
{
if (lat > 90 * COORDINATE_PRECISION || lat < -90 * COORDINATE_PRECISION ||
lon > 180 * COORDINATE_PRECISION || lon < -180 * COORDINATE_PRECISION)
{
return false;
}
return true;
}
bool FixedPointCoordinate::operator==(const FixedPointCoordinate &other) const
{
return lat == other.lat && lon == other.lon;
}
double FixedPointCoordinate::ApproximateDistance(const int lat1,
const int lon1,
const int lat2,
const int lon2)
{
BOOST_ASSERT(lat1 != std::numeric_limits<int>::min());
BOOST_ASSERT(lon1 != std::numeric_limits<int>::min());
BOOST_ASSERT(lat2 != std::numeric_limits<int>::min());
BOOST_ASSERT(lon2 != std::numeric_limits<int>::min());
double RAD = 0.017453292519943295769236907684886;
double lt1 = lat1 / COORDINATE_PRECISION;
double ln1 = lon1 / COORDINATE_PRECISION;
double lt2 = lat2 / COORDINATE_PRECISION;
double ln2 = lon2 / COORDINATE_PRECISION;
double dlat1 = lt1 * (RAD);
double dlong1 = ln1 * (RAD);
double dlat2 = lt2 * (RAD);
double dlong2 = ln2 * (RAD);
double dLong = dlong1 - dlong2;
double dLat = dlat1 - dlat2;
double aHarv = pow(sin(dLat / 2.0), 2.0) + cos(dlat1) * cos(dlat2) * pow(sin(dLong / 2.), 2);
double cHarv = 2. * atan2(sqrt(aHarv), sqrt(1.0 - aHarv));
// earth radius varies between 6,356.750-6,378.135 km (3,949.901-3,963.189mi)
// The IUGG value for the equatorial radius is 6378.137 km (3963.19 miles)
const double earth = 6372797.560856;
return earth * cHarv;
}
double FixedPointCoordinate::ApproximateDistance(const FixedPointCoordinate &c1,
const FixedPointCoordinate &c2)
{
return ApproximateDistance(c1.lat, c1.lon, c2.lat, c2.lon);
}
float FixedPointCoordinate::ApproximateEuclideanDistance(const FixedPointCoordinate &c1,
const FixedPointCoordinate &c2)
{
return ApproximateEuclideanDistance(c1.lat, c1.lon, c2.lat, c2.lon);
}
float FixedPointCoordinate::ApproximateEuclideanDistance(const int lat1,
const int lon1,
const int lat2,
const int lon2)
{
BOOST_ASSERT(lat1 != std::numeric_limits<int>::min());
BOOST_ASSERT(lon1 != std::numeric_limits<int>::min());
BOOST_ASSERT(lat2 != std::numeric_limits<int>::min());
BOOST_ASSERT(lon2 != std::numeric_limits<int>::min());
const float RAD = 0.017453292519943295769236907684886;
const float float_lat1 = (lat1 / COORDINATE_PRECISION) * RAD;
const float float_lon1 = (lon1 / COORDINATE_PRECISION) * RAD;
const float float_lat2 = (lat2 / COORDINATE_PRECISION) * RAD;
const float float_lon2 = (lon2 / COORDINATE_PRECISION) * RAD;
const float x = (float_lon2 - float_lon1) * cos((float_lat1 + float_lat2) / 2.);
const float y = (float_lat2 - float_lat1);
const float earth_radius = 6372797.560856;
return sqrt(x * x + y * y) * earth_radius;
}
float FixedPointCoordinate::ComputePerpendicularDistance(const FixedPointCoordinate &point,
const FixedPointCoordinate &segA,
const FixedPointCoordinate &segB)
{
const float x = lat2y(point.lat / COORDINATE_PRECISION);
const float y = point.lon / COORDINATE_PRECISION;
const float a = lat2y(segA.lat / COORDINATE_PRECISION);
const float b = segA.lon / COORDINATE_PRECISION;
const float c = lat2y(segB.lat / COORDINATE_PRECISION);
const float d = segB.lon / COORDINATE_PRECISION;
float p, q, nY;
if (std::abs(a - c) > std::numeric_limits<float>::epsilon())
{
const float m = (d - b) / (c - a); // slope
// Projection of (x,y) on line joining (a,b) and (c,d)
p = ((x + (m * y)) + (m * m * a - m * b)) / (1. + m * m);
q = b + m * (p - a);
}
else
{
p = c;
q = y;
}
nY = (d * p - c * q) / (a * d - b * c);
// discretize the result to coordinate precision. it's a hack!
if (std::abs(nY) < (1. / COORDINATE_PRECISION))
{
nY = 0.;
}
float r = (p - nY * a) / c;
if (std::isnan(r))
{
r = ((segB.lat == point.lat) && (segB.lon == point.lon)) ? 1. : 0.;
}
else if (std::abs(r) <= std::numeric_limits<float>::epsilon())
{
r = 0.;
}
else if (std::abs(r - 1.) <= std::numeric_limits<float>::epsilon())
{
r = 1.;
}
FixedPointCoordinate nearest_location;
BOOST_ASSERT(!std::isnan(r));
if (r <= 0.)
{ // point is "left" of edge
nearest_location.lat = segA.lat;
nearest_location.lon = segA.lon;
}
else if (r >= 1.)
{ // point is "right" of edge
nearest_location.lat = segB.lat;
nearest_location.lon = segB.lon;
}
else
{ // point lies in between
nearest_location.lat = y2lat(p) * COORDINATE_PRECISION;
nearest_location.lon = q * COORDINATE_PRECISION;
}
BOOST_ASSERT(nearest_location.isValid());
const float approximated_distance =
FixedPointCoordinate::ApproximateEuclideanDistance(point, nearest_location);
BOOST_ASSERT(0. <= approximated_distance);
return approximated_distance;
}
float FixedPointCoordinate::ComputePerpendicularDistance(const FixedPointCoordinate &coord_a,
const FixedPointCoordinate &coord_b,
const FixedPointCoordinate &query_location,
FixedPointCoordinate &nearest_location,
float &r)
{
BOOST_ASSERT(query_location.isValid());
const float x = lat2y(query_location.lat / COORDINATE_PRECISION);
const float y = query_location.lon / COORDINATE_PRECISION;
const float a = lat2y(coord_a.lat / COORDINATE_PRECISION);
const float b = coord_a.lon / COORDINATE_PRECISION;
const float c = lat2y(coord_b.lat / COORDINATE_PRECISION);
const float d = coord_b.lon / COORDINATE_PRECISION;
float p, q /*,mX*/, nY;
if (std::abs(a - c) > std::numeric_limits<float>::epsilon())
{
const float m = (d - b) / (c - a); // slope
// Projection of (x,y) on line joining (a,b) and (c,d)
p = ((x + (m * y)) + (m * m * a - m * b)) / (1. + m * m);
q = b + m * (p - a);
}
else
{
p = c;
q = y;
}
nY = (d * p - c * q) / (a * d - b * c);
// discretize the result to coordinate precision. it's a hack!
if (std::abs(nY) < (1. / COORDINATE_PRECISION))
{
nY = 0.;
}
r = (p - nY * a) / c; // These values are actually n/m+n and m/m+n , we need
// not calculate the explicit values of m an n as we
// are just interested in the ratio
if (std::isnan(r))
{
r = ((coord_b.lat == query_location.lat) && (coord_b.lon == query_location.lon)) ? 1. : 0.;
}
else if (std::abs(r) <= std::numeric_limits<float>::epsilon())
{
r = 0.;
}
else if (std::abs(r - 1.) <= std::numeric_limits<float>::epsilon())
{
r = 1.;
}
BOOST_ASSERT(!std::isnan(r));
if (r <= 0.)
{
nearest_location = coord_a;
}
else if (r >= 1.)
{
nearest_location = coord_b;
}
else
{
// point lies in between
nearest_location.lat = y2lat(p) * COORDINATE_PRECISION;
nearest_location.lon = q * COORDINATE_PRECISION;
}
BOOST_ASSERT(nearest_location.isValid());
// TODO: Replace with euclidean approximation when k-NN search is done
// const float approximated_distance = FixedPointCoordinate::ApproximateEuclideanDistance(
const float approximated_distance =
FixedPointCoordinate::ApproximateEuclideanDistance(query_location, nearest_location);
BOOST_ASSERT(0. <= approximated_distance);
return approximated_distance;
}
void FixedPointCoordinate::convertInternalLatLonToString(const int value, std::string &output)
{
char buffer[12];
buffer[11] = 0; // zero termination
output = printInt<11, 6>(buffer, value);
}
void FixedPointCoordinate::convertInternalCoordinateToString(const FixedPointCoordinate &coord,
std::string &output)
{
std::string tmp;
tmp.reserve(23);
convertInternalLatLonToString(coord.lon, tmp);
output = tmp;
output += ",";
convertInternalLatLonToString(coord.lat, tmp);
output += tmp;
}
void
FixedPointCoordinate::convertInternalReversedCoordinateToString(const FixedPointCoordinate &coord,
std::string &output)
{
std::string tmp;
tmp.reserve(23);
convertInternalLatLonToString(coord.lat, tmp);
output = tmp;
output += ",";
convertInternalLatLonToString(coord.lon, tmp);
output += tmp;
}
void FixedPointCoordinate::Output(std::ostream &out) const
{
out << "(" << lat / COORDINATE_PRECISION << "," << lon / COORDINATE_PRECISION << ")";
}
float FixedPointCoordinate::GetBearing(const FixedPointCoordinate &A, const FixedPointCoordinate &B)
{
const float delta_long = DegreeToRadian(B.lon / COORDINATE_PRECISION - A.lon / COORDINATE_PRECISION);
const float lat1 = DegreeToRadian(A.lat / COORDINATE_PRECISION);
const float lat2 = DegreeToRadian(B.lat / COORDINATE_PRECISION);
const float y = sin(delta_long) * cos(lat2);
const float x = cos(lat1) * sin(lat2) - sin(lat1) * cos(lat2) * cos(delta_long);
float result = RadianToDegree(std::atan2(y, x));
while (result < 0.f)
{
result += 360.f;
}
while (result >= 360.f)
{
result -= 360.f;
}
return result;
}
float FixedPointCoordinate::GetBearing(const FixedPointCoordinate &other) const
{
const float delta_long = DegreeToRadian(lon / COORDINATE_PRECISION - other.lon / COORDINATE_PRECISION);
const float lat1 = DegreeToRadian(other.lat / COORDINATE_PRECISION);
const float lat2 = DegreeToRadian(lat / COORDINATE_PRECISION);
const float y = std::sin(delta_long) * std::cos(lat2);
const float x = std::cos(lat1) * std::sin(lat2) - std::sin(lat1) * std::cos(lat2) * std::cos(delta_long);
float result = RadianToDegree(std::atan2(y, x));
while (result < 0.f)
{
result += 360.f;
}
while (result >= 360.f)
{
result -= 360.f;
}
return result;
}
float FixedPointCoordinate::DegreeToRadian(const float degree)
{
return degree * (M_PI / 180.f);
}
float FixedPointCoordinate::RadianToDegree(const float radian)
{
return radian * (180.f / M_PI);
}

View File

@ -0,0 +1,401 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef DEALLOCATINGVECTOR_H_
#define DEALLOCATINGVECTOR_H_
#include <boost/assert.hpp>
#include <cstring>
#include <vector>
template <typename ElementT,
std::size_t bucketSizeC = 8388608 / sizeof(ElementT),
bool DeallocateC = false>
class DeallocatingVectorIterator : public std::iterator<std::random_access_iterator_tag, ElementT>
{
protected:
class DeallocatingVectorIteratorState
{
private:
// make constructors explicit, so we do not mix random access and deallocation iterators.
DeallocatingVectorIteratorState();
public:
explicit DeallocatingVectorIteratorState(const DeallocatingVectorIteratorState &r)
: index(r.index), bucket_list(r.bucket_list)
{
}
explicit DeallocatingVectorIteratorState(const std::size_t idx,
std::vector<ElementT *> &input_list)
: index(idx), bucket_list(input_list)
{
}
std::size_t index;
std::vector<ElementT *> &bucket_list;
inline bool operator!=(const DeallocatingVectorIteratorState &other)
{
return index != other.index;
}
inline bool operator==(const DeallocatingVectorIteratorState &other)
{
return index == other.index;
}
bool operator<(const DeallocatingVectorIteratorState &other) const
{
return index < other.index;
}
bool operator>(const DeallocatingVectorIteratorState &other) const
{
return index > other.index;
}
bool operator>=(const DeallocatingVectorIteratorState &other) const
{
return index >= other.index;
}
// This is a hack to make assignment operator possible with reference member
inline DeallocatingVectorIteratorState &operator=(const DeallocatingVectorIteratorState &a)
{
if (this != &a)
{
this->DeallocatingVectorIteratorState::
~DeallocatingVectorIteratorState(); // explicit non-virtual destructor
new (this) DeallocatingVectorIteratorState(a); // placement new
}
return *this;
}
};
DeallocatingVectorIteratorState current_state;
public:
typedef std::random_access_iterator_tag iterator_category;
typedef typename std::iterator<std::random_access_iterator_tag, ElementT>::value_type
value_type;
typedef typename std::iterator<std::random_access_iterator_tag, ElementT>::difference_type
difference_type;
typedef typename std::iterator<std::random_access_iterator_tag, ElementT>::reference reference;
typedef typename std::iterator<std::random_access_iterator_tag, ElementT>::pointer pointer;
DeallocatingVectorIterator() {}
template <typename T2>
explicit DeallocatingVectorIterator(const DeallocatingVectorIterator<T2> &r)
: current_state(r.current_state)
{
}
DeallocatingVectorIterator(std::size_t idx, std::vector<ElementT *> &input_list)
: current_state(idx, input_list)
{
}
explicit DeallocatingVectorIterator(const DeallocatingVectorIteratorState &r) : current_state(r) {}
template <typename T2>
DeallocatingVectorIterator &operator=(const DeallocatingVectorIterator<T2> &r)
{
if (DeallocateC)
{
BOOST_ASSERT(false);
}
current_state = r.current_state;
return *this;
}
inline DeallocatingVectorIterator &operator++()
{ // prefix
++current_state.index;
return *this;
}
inline DeallocatingVectorIterator &operator--()
{ // prefix
if (DeallocateC)
{
BOOST_ASSERT(false);
}
--current_state.index;
return *this;
}
inline DeallocatingVectorIterator operator++(int)
{ // postfix
DeallocatingVectorIteratorState my_state(current_state);
current_state.index++;
return DeallocatingVectorIterator(my_state);
}
inline DeallocatingVectorIterator operator--(int)
{ // postfix
if (DeallocateC)
{
BOOST_ASSERT(false);
}
DeallocatingVectorIteratorState my_state(current_state);
current_state.index--;
return DeallocatingVectorIterator(my_state);
}
inline DeallocatingVectorIterator operator+(const difference_type &n) const
{
DeallocatingVectorIteratorState my_state(current_state);
my_state.index += n;
return DeallocatingVectorIterator(my_state);
}
inline DeallocatingVectorIterator &operator+=(const difference_type &n)
{
current_state.index += n;
return *this;
}
inline DeallocatingVectorIterator operator-(const difference_type &n) const
{
if (DeallocateC)
{
BOOST_ASSERT(false);
}
DeallocatingVectorIteratorState my_state(current_state);
my_state.index -= n;
return DeallocatingVectorIterator(my_state);
}
inline DeallocatingVectorIterator &operator-=(const difference_type &n) const
{
if (DeallocateC)
{
BOOST_ASSERT(false);
}
current_state.index -= n;
return *this;
}
inline reference operator*() const
{
std::size_t current_bucket = current_state.index / bucketSizeC;
std::size_t current_index = current_state.index % bucketSizeC;
return (current_state.bucket_list[current_bucket][current_index]);
}
inline pointer operator->() const
{
std::size_t current_bucket = current_state.index / bucketSizeC;
std::size_t current_index = current_state.index % bucketSizeC;
return &(current_state.bucket_list[current_bucket][current_index]);
}
inline bool operator!=(const DeallocatingVectorIterator &other)
{
return current_state != other.current_state;
}
inline bool operator==(const DeallocatingVectorIterator &other)
{
return current_state == other.current_state;
}
inline bool operator<(const DeallocatingVectorIterator &other) const
{
return current_state < other.current_state;
}
inline bool operator>(const DeallocatingVectorIterator &other) const
{
return current_state > other.current_state;
}
inline bool operator>=(const DeallocatingVectorIterator &other) const
{
return current_state >= other.current_state;
}
difference_type operator-(const DeallocatingVectorIterator &other)
{
if (DeallocateC)
{
BOOST_ASSERT(false);
}
return current_state.index - other.current_state.index;
}
};
template <typename ElementT, std::size_t bucketSizeC = 8388608 / sizeof(ElementT)>
class DeallocatingVector
{
private:
std::size_t current_size;
std::vector<ElementT *> bucket_list;
public:
typedef ElementT value_type;
typedef DeallocatingVectorIterator<ElementT, bucketSizeC, false> iterator;
typedef DeallocatingVectorIterator<ElementT, bucketSizeC, false> const_iterator;
// this iterator deallocates all buckets that have been visited. Iterators to visited objects
// become invalid.
typedef DeallocatingVectorIterator<ElementT, bucketSizeC, true> deallocation_iterator;
DeallocatingVector() : current_size(0)
{
// initial bucket
bucket_list.emplace_back(new ElementT[bucketSizeC]);
}
~DeallocatingVector() { clear(); }
inline void swap(DeallocatingVector<ElementT, bucketSizeC> &other)
{
std::swap(current_size, other.current_size);
bucket_list.swap(other.bucket_list);
}
inline void clear()
{
// Delete[]'ing ptr's to all Buckets
for (unsigned i = 0; i < bucket_list.size(); ++i)
{
if (nullptr != bucket_list[i])
{
delete[] bucket_list[i];
bucket_list[i] = nullptr;
}
}
// Removing all ptrs from vector
std::vector<ElementT *>().swap(bucket_list);
current_size = 0;
}
inline void push_back(const ElementT &element)
{
const std::size_t current_capacity = capacity();
if (current_size == current_capacity)
{
bucket_list.push_back(new ElementT[bucketSizeC]);
}
std::size_t current_index = size() % bucketSizeC;
bucket_list.back()[current_index] = element;
++current_size;
}
inline void emplace_back(const ElementT &&element)
{
const std::size_t current_capacity = capacity();
if (current_size == current_capacity)
{
bucket_list.push_back(new ElementT[bucketSizeC]);
}
const std::size_t current_index = size() % bucketSizeC;
bucket_list.back()[current_index] = element;
++current_size;
}
inline void reserve(const std::size_t) const
{
// don't do anything
}
inline void resize(const std::size_t new_size)
{
if (new_size > current_size)
{
while (capacity() < new_size)
{
bucket_list.push_back(new ElementT[bucketSizeC]);
}
current_size = new_size;
}
if (new_size < current_size)
{
const std::size_t number_of_necessary_buckets = 1 + (new_size / bucketSizeC);
for (unsigned i = number_of_necessary_buckets; i < bucket_list.size(); ++i)
{
delete[] bucket_list[i];
}
bucket_list.resize(number_of_necessary_buckets);
current_size = new_size;
}
}
inline std::size_t size() const { return current_size; }
inline std::size_t capacity() const { return bucket_list.size() * bucketSizeC; }
inline iterator begin() { return iterator(static_cast<std::size_t>(0), bucket_list); }
inline iterator end() { return iterator(size(), bucket_list); }
inline deallocation_iterator dbegin()
{
return deallocation_iterator(static_cast<std::size_t>(0), bucket_list);
}
inline deallocation_iterator dend() { return deallocation_iterator(size(), bucket_list); }
inline const_iterator begin() const
{
return const_iterator(static_cast<std::size_t>(0), bucket_list);
}
inline const_iterator end() const { return const_iterator(size(), bucket_list); }
inline ElementT &operator[](const std::size_t index)
{
std::size_t _bucket = index / bucketSizeC;
std::size_t _index = index % bucketSizeC;
return (bucket_list[_bucket][_index]);
}
const inline ElementT &operator[](const std::size_t index) const
{
std::size_t _bucket = index / bucketSizeC;
std::size_t _index = index % bucketSizeC;
return (bucket_list[_bucket][_index]);
}
inline ElementT &back()
{
std::size_t _bucket = current_size / bucketSizeC;
std::size_t _index = current_size % bucketSizeC;
return (bucket_list[_bucket][_index]);
}
const inline ElementT &back() const
{
std::size_t _bucket = current_size / bucketSizeC;
std::size_t _index = current_size % bucketSizeC;
return (bucket_list[_bucket][_index]);
}
};
#endif /* DEALLOCATINGVECTOR_H_ */

View File

@ -0,0 +1,281 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef DYNAMICGRAPH_H
#define DYNAMICGRAPH_H
#include "../DataStructures/DeallocatingVector.h"
#include <boost/assert.hpp>
#include <boost/range/irange.hpp>
#include <cstdint>
#include <algorithm>
#include <limits>
#include <vector>
template <typename EdgeDataT> class DynamicGraph
{
public:
typedef decltype(boost::irange(0u,0u)) EdgeRange;
typedef EdgeDataT EdgeData;
typedef unsigned NodeIterator;
typedef unsigned EdgeIterator;
class InputEdge
{
public:
NodeIterator source;
NodeIterator target;
EdgeDataT data;
bool operator<(const InputEdge &right) const
{
if (source != right.source)
return source < right.source;
return target < right.target;
}
};
// Constructs an empty graph with a given number of nodes.
explicit DynamicGraph(int32_t nodes) : m_numNodes(nodes), m_numEdges(0)
{
m_nodes.reserve(m_numNodes);
m_nodes.resize(m_numNodes);
m_edges.reserve(m_numNodes * 1.1);
m_edges.resize(m_numNodes);
}
template <class ContainerT> DynamicGraph(const int32_t nodes, const ContainerT &graph)
{
m_numNodes = nodes;
m_numEdges = (EdgeIterator)graph.size();
m_nodes.reserve(m_numNodes + 1);
m_nodes.resize(m_numNodes + 1);
EdgeIterator edge = 0;
EdgeIterator position = 0;
for (NodeIterator node = 0; node < m_numNodes; ++node)
{
EdgeIterator lastEdge = edge;
while (edge < m_numEdges && graph[edge].source == node)
{
++edge;
}
m_nodes[node].firstEdge = position;
m_nodes[node].edges = edge - lastEdge;
position += m_nodes[node].edges;
}
m_nodes.back().firstEdge = position;
m_edges.reserve(position * 1.1);
m_edges.resize(position);
edge = 0;
for (NodeIterator node = 0; node < m_numNodes; ++node)
{
for (EdgeIterator i = m_nodes[node].firstEdge,
e = m_nodes[node].firstEdge + m_nodes[node].edges;
i != e;
++i)
{
m_edges[i].target = graph[edge].target;
m_edges[i].data = graph[edge].data;
BOOST_ASSERT_MSG(graph[edge].data.distance > 0, "edge distance invalid");
++edge;
}
}
}
~DynamicGraph() {}
unsigned GetNumberOfNodes() const { return m_numNodes; }
unsigned GetNumberOfEdges() const { return m_numEdges; }
unsigned GetOutDegree(const NodeIterator n) const { return m_nodes[n].edges; }
unsigned GetDirectedOutDegree(const NodeIterator n) const
{
unsigned degree = 0;
for(EdgeIterator edge = BeginEdges(n); edge < EndEdges(n); ++edge)
{
if (GetEdgeData(edge).forward)
{
++degree;
}
}
return degree;
}
NodeIterator GetTarget(const EdgeIterator e) const { return NodeIterator(m_edges[e].target); }
void SetTarget(const EdgeIterator e, const NodeIterator n) { m_edges[e].target = n; }
EdgeDataT &GetEdgeData(const EdgeIterator e) { return m_edges[e].data; }
const EdgeDataT &GetEdgeData(const EdgeIterator e) const { return m_edges[e].data; }
EdgeIterator BeginEdges(const NodeIterator n) const
{
return EdgeIterator(m_nodes[n].firstEdge);
}
EdgeIterator EndEdges(const NodeIterator n) const
{
return EdgeIterator(m_nodes[n].firstEdge + m_nodes[n].edges);
}
EdgeRange GetAdjacentEdgeRange(const NodeIterator node) const
{
return boost::irange(BeginEdges(node), EndEdges(node));
}
// adds an edge. Invalidates edge iterators for the source node
EdgeIterator InsertEdge(const NodeIterator from, const NodeIterator to, const EdgeDataT &data)
{
Node &node = m_nodes[from];
EdgeIterator newFirstEdge = node.edges + node.firstEdge;
if (newFirstEdge >= m_edges.size() || !isDummy(newFirstEdge))
{
if (node.firstEdge != 0 && isDummy(node.firstEdge - 1))
{
node.firstEdge--;
m_edges[node.firstEdge] = m_edges[node.firstEdge + node.edges];
}
else
{
EdgeIterator newFirstEdge = (EdgeIterator)m_edges.size();
unsigned newSize = node.edges * 1.1 + 2;
EdgeIterator requiredCapacity = newSize + m_edges.size();
EdgeIterator oldCapacity = m_edges.capacity();
if (requiredCapacity >= oldCapacity)
{
m_edges.reserve(requiredCapacity * 1.1);
}
m_edges.resize(m_edges.size() + newSize);
for (EdgeIterator i = 0; i < node.edges; ++i)
{
m_edges[newFirstEdge + i] = m_edges[node.firstEdge + i];
makeDummy(node.firstEdge + i);
}
for (EdgeIterator i = node.edges + 1; i < newSize; ++i)
makeDummy(newFirstEdge + i);
node.firstEdge = newFirstEdge;
}
}
Edge &edge = m_edges[node.firstEdge + node.edges];
edge.target = to;
edge.data = data;
++m_numEdges;
++node.edges;
return EdgeIterator(node.firstEdge + node.edges);
}
// removes an edge. Invalidates edge iterators for the source node
void DeleteEdge(const NodeIterator source, const EdgeIterator e)
{
Node &node = m_nodes[source];
#pragma omp atomic
--m_numEdges;
--node.edges;
BOOST_ASSERT(std::numeric_limits<unsigned>::max() != node.edges);
const unsigned last = node.firstEdge + node.edges;
BOOST_ASSERT(std::numeric_limits<unsigned>::max() != last);
// swap with last edge
m_edges[e] = m_edges[last];
makeDummy(last);
}
// removes all edges (source,target)
int32_t DeleteEdgesTo(const NodeIterator source, const NodeIterator target)
{
int32_t deleted = 0;
for (EdgeIterator i = BeginEdges(source), iend = EndEdges(source); i < iend - deleted; ++i)
{
if (m_edges[i].target == target)
{
do
{
deleted++;
m_edges[i] = m_edges[iend - deleted];
makeDummy(iend - deleted);
} while (i < iend - deleted && m_edges[i].target == target);
}
}
#pragma omp atomic
m_numEdges -= deleted;
m_nodes[source].edges -= deleted;
return deleted;
}
// searches for a specific edge
EdgeIterator FindEdge(const NodeIterator from, const NodeIterator to) const
{
for (EdgeIterator i = BeginEdges(from), iend = EndEdges(from); i != iend; ++i)
{
if (to == m_edges[i].target)
{
return i;
}
}
return EndEdges(from);
}
protected:
bool isDummy(const EdgeIterator edge) const
{
return m_edges[edge].target == (std::numeric_limits<NodeIterator>::max)();
}
void makeDummy(const EdgeIterator edge)
{
m_edges[edge].target = (std::numeric_limits<NodeIterator>::max)();
}
struct Node
{
// index of the first edge
EdgeIterator firstEdge;
// amount of edges
unsigned edges;
};
struct Edge
{
NodeIterator target;
EdgeDataT data;
};
NodeIterator m_numNodes;
EdgeIterator m_numEdges;
std::vector<Node> m_nodes;
DeallocatingVector<Edge> m_edges;
};
#endif // DYNAMICGRAPH_H

View File

@ -0,0 +1,90 @@
#ifndef EDGE_BASED_NODE_H
#define EDGE_BASED_NODE_H
#include "../Util/SimpleLogger.h"
#include "../typedefs.h"
#include <osrm/Coordinate.h>
#include <boost/assert.hpp>
#include <limits>
struct EdgeBasedNode
{
EdgeBasedNode() :
forward_edge_based_node_id(SPECIAL_NODEID),
reverse_edge_based_node_id(SPECIAL_NODEID),
u(SPECIAL_NODEID),
v(SPECIAL_NODEID),
name_id(0),
forward_weight(INVALID_EDGE_WEIGHT >> 1),
reverse_weight(INVALID_EDGE_WEIGHT >> 1),
forward_offset(0),
reverse_offset(0),
packed_geometry_id(SPECIAL_EDGEID),
fwd_segment_position( std::numeric_limits<unsigned short>::max() ),
is_in_tiny_cc(false)
{ }
explicit EdgeBasedNode(
NodeID forward_edge_based_node_id,
NodeID reverse_edge_based_node_id,
NodeID u,
NodeID v,
unsigned name_id,
int forward_weight,
int reverse_weight,
int forward_offset,
int reverse_offset,
unsigned packed_geometry_id,
unsigned short fwd_segment_position,
bool belongs_to_tiny_component
) :
forward_edge_based_node_id(forward_edge_based_node_id),
reverse_edge_based_node_id(reverse_edge_based_node_id),
u(u),
v(v),
name_id(name_id),
forward_weight(forward_weight),
reverse_weight(reverse_weight),
forward_offset(forward_offset),
reverse_offset(reverse_offset),
packed_geometry_id(packed_geometry_id),
fwd_segment_position(fwd_segment_position),
is_in_tiny_cc(belongs_to_tiny_component)
{
BOOST_ASSERT((forward_edge_based_node_id != SPECIAL_NODEID) ||
(reverse_edge_based_node_id != SPECIAL_NODEID));
}
static inline FixedPointCoordinate Centroid(const FixedPointCoordinate & a, const FixedPointCoordinate & b)
{
FixedPointCoordinate centroid;
//The coordinates of the midpoint are given by:
centroid.lat = (a.lat + b.lat)/2;
centroid.lon = (a.lon + b.lon)/2;
return centroid;
}
bool IsCompressed() const
{
return packed_geometry_id != SPECIAL_EDGEID;
}
NodeID forward_edge_based_node_id; // needed for edge-expanded graph
NodeID reverse_edge_based_node_id; // needed for edge-expanded graph
NodeID u; // indices into the coordinates array
NodeID v; // indices into the coordinates array
unsigned name_id; // id of the edge name
int forward_weight; // weight of the edge
int reverse_weight; // weight in the other direction (may be different)
int forward_offset; // prefix sum of the weight up the edge TODO: short must suffice
int reverse_offset; // prefix sum of the weight from the edge TODO: short must suffice
unsigned packed_geometry_id; // if set, then the edge represents a packed geometry
unsigned short fwd_segment_position; // segment id in a compressed geometry
bool is_in_tiny_cc;
};
#endif //EDGE_BASED_NODE_H

View File

@ -0,0 +1,65 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef HASH_TABLE_H
#define HASH_TABLE_H
#include <unordered_map>
template <typename Key, typename Value> class HashTable : public std::unordered_map<Key, Value>
{
private:
typedef std::unordered_map<Key, Value> super;
public:
HashTable() : super() {}
explicit HashTable(const unsigned size) : super(size) {}
inline void Add(Key const &key, Value const &value) { super::emplace(key, value); }
inline const Value Find(Key const &key) const
{
auto iter = super::find(key);
if (iter == super::end())
{
return Value();
}
return iter->second;
}
inline const bool Holds(Key const &key) const
{
if (super::find(key) == super::end())
{
return false;
}
return true;
}
};
#endif /* HASH_TABLE_H */

View File

@ -0,0 +1,100 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "HilbertValue.h"
#include <osrm/Coordinate.h>
uint64_t HilbertCode::operator()(const FixedPointCoordinate &current_coordinate) const
{
unsigned location[2];
location[0] = current_coordinate.lat + (90 * COORDINATE_PRECISION);
location[1] = current_coordinate.lon + (180 * COORDINATE_PRECISION);
TransposeCoordinate(location);
return BitInterleaving(location[0], location[1]);
}
uint64_t HilbertCode::BitInterleaving(const uint32_t latitude, const uint32_t longitude) const
{
uint64_t result = 0;
for (int8_t index = 31; index >= 0; --index)
{
result |= (latitude >> index) & 1;
result <<= 1;
result |= (longitude >> index) & 1;
if (0 != index)
{
result <<= 1;
}
}
return result;
}
void HilbertCode::TransposeCoordinate(uint32_t *X) const
{
uint32_t M = 1 << (32 - 1), P, Q, t;
int i;
// Inverse undo
for (Q = M; Q > 1; Q >>= 1)
{
P = Q - 1;
for (i = 0; i < 2; ++i)
{
const bool condition = (X[i] & Q);
if (condition)
{
X[0] ^= P; // invert
}
else
{
t = (X[0] ^ X[i]) & P;
X[0] ^= t;
X[i] ^= t;
}
} // exchange
}
// Gray encode
for (i = 1; i < 2; ++i)
{
X[i] ^= X[i - 1];
}
t = 0;
for (Q = M; Q > 1; Q >>= 1)
{
const bool condition = (X[2 - 1] & Q);
if (condition)
{
t ^= Q - 1;
}
} // check if this for loop is wrong
for (i = 0; i < 2; ++i)
{
X[i] ^= t;
}
}

View File

@ -1,6 +1,6 @@
/*
Copyright (c) 2017, Project OSRM contributors
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
@ -25,25 +25,25 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef OSRM_ENGINE_BEARING_HPP
#define OSRM_ENGINE_BEARING_HPP
#ifndef HILBERTVALUE_H_
#define HILBERTVALUE_H_
namespace osrm::engine
#include <cstdint>
// computes a 64 bit value that corresponds to the hilbert space filling curve
struct FixedPointCoordinate;
class HilbertCode
{
public:
uint64_t operator()(const FixedPointCoordinate &current_coordinate) const;
HilbertCode() {}
HilbertCode(const HilbertCode &) = delete;
struct Bearing
{
short bearing;
short range;
bool IsValid() const { return bearing >= 0 && bearing <= 360 && range >= 0 && range <= 180; }
private:
inline uint64_t BitInterleaving(const uint32_t a, const uint32_t b) const;
inline void TransposeCoordinate(uint32_t *X) const;
};
inline bool operator==(const Bearing lhs, const Bearing rhs)
{
return lhs.bearing == rhs.bearing && lhs.range == rhs.range;
}
inline bool operator!=(const Bearing lhs, const Bearing rhs) { return !(lhs == rhs); }
} // namespace osrm::engine
#endif
#endif /* HILBERTVALUE_H_ */

178
DataStructures/ImportEdge.h Normal file
View File

@ -0,0 +1,178 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef IMPORT_EDGE_H
#define IMPORT_EDGE_H
#include "../Util/OSRMException.h"
#include "../typedefs.h"
#include <boost/assert.hpp>
class NodeBasedEdge
{
public:
bool operator<(const NodeBasedEdge &e) const
{
if (source() == e.source())
{
if (target() == e.target())
{
if (weight() == e.weight())
{
return (isForward() && isBackward() && ((!e.isForward()) || (!e.isBackward())));
}
return (weight() < e.weight());
}
return (target() < e.target());
}
return (source() < e.source());
}
explicit NodeBasedEdge(NodeID s,
NodeID t,
NodeID n,
EdgeWeight w,
bool f,
bool b,
short ty,
bool ra,
bool ig,
bool ar,
bool cf,
bool is_split)
: _source(s), _target(t), _name(n), _weight(w), _type(ty), forward(f), backward(b),
_roundabout(ra), _ignoreInGrid(ig), _accessRestricted(ar), _contraFlow(cf),
is_split(is_split)
{
if (ty < 0)
{
throw OSRMException("negative edge type");
}
}
NodeID target() const { return _target; }
NodeID source() const { return _source; }
NodeID name() const { return _name; }
EdgeWeight weight() const { return _weight; }
short type() const
{
BOOST_ASSERT_MSG(_type >= 0, "type of ImportEdge invalid");
return _type;
}
bool isBackward() const { return backward; }
bool isForward() const { return forward; }
bool isLocatable() const { return _type != 14; }
bool isRoundabout() const { return _roundabout; }
bool ignoreInGrid() const { return _ignoreInGrid; }
bool isAccessRestricted() const { return _accessRestricted; }
bool isContraFlow() const { return _contraFlow; }
bool IsSplit() const { return is_split; }
// TODO: names need to be fixed.
NodeID _source;
NodeID _target;
NodeID _name;
EdgeWeight _weight;
short _type;
bool forward : 1;
bool backward : 1;
bool _roundabout : 1;
bool _ignoreInGrid : 1;
bool _accessRestricted : 1;
bool _contraFlow : 1;
bool is_split : 1;
private:
NodeBasedEdge() {}
};
class EdgeBasedEdge
{
public:
bool operator<(const EdgeBasedEdge &e) const
{
if (source() == e.source())
{
if (target() == e.target())
{
if (weight() == e.weight())
{
return (isForward() && isBackward() && ((!e.isForward()) || (!e.isBackward())));
}
return (weight() < e.weight());
}
return (target() < e.target());
}
return (source() < e.source());
}
template <class EdgeT>
explicit EdgeBasedEdge(const EdgeT &myEdge)
: m_source(myEdge.source), m_target(myEdge.target), m_edgeID(myEdge.data.via),
m_weight(myEdge.data.distance), m_forward(myEdge.data.forward),
m_backward(myEdge.data.backward)
{
}
/** Default constructor. target and weight are set to 0.*/
EdgeBasedEdge()
: m_source(0), m_target(0), m_edgeID(0), m_weight(0), m_forward(false), m_backward(false)
{
}
explicit EdgeBasedEdge(const NodeID s,
const NodeID t,
const NodeID v,
const EdgeWeight w,
const bool f,
const bool b)
: m_source(s), m_target(t), m_edgeID(v), m_weight(w), m_forward(f), m_backward(b)
{
}
NodeID target() const { return m_target; }
NodeID source() const { return m_source; }
EdgeWeight weight() const { return m_weight; }
NodeID id() const { return m_edgeID; }
bool isBackward() const { return m_backward; }
bool isForward() const { return m_forward; }
private:
NodeID m_source;
NodeID m_target;
NodeID m_edgeID;
EdgeWeight m_weight : 30;
bool m_forward : 1;
bool m_backward : 1;
};
typedef NodeBasedEdge ImportEdge;
#endif /* IMPORT_EDGE_H */

View File

@ -1,6 +1,6 @@
/*
Copyright (c) 2017, Project OSRM contributors
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
@ -25,51 +25,54 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef OSRM_UPDATER_UPDATER_CONFIG_HPP
#define OSRM_UPDATER_UPDATER_CONFIG_HPP
#ifndef IMPORTNODE_H_
#define IMPORTNODE_H_
#include "storage/io_config.hpp"
#include "storage/storage_config.hpp"
#include "QueryNode.h"
#include "../DataStructures/HashTable.h"
#include <chrono>
#include <filesystem>
#include <limits>
#include <string>
namespace osrm::updater
struct ExternalMemoryNode : NodeInfo
{
struct UpdaterConfig final : storage::IOConfig
{
UpdaterConfig()
: IOConfig({".osrm.ebg",
".osrm.turn_weight_penalties",
".osrm.turn_duration_penalties",
".osrm.turn_penalties_index",
".osrm.nbg_nodes",
".osrm.ebg_nodes",
".osrm.geometry",
".osrm.fileIndex",
".osrm.properties",
".osrm.restrictions",
".osrm.enw"},
{},
{".osrm.datasource_names"}),
valid_now(0)
ExternalMemoryNode(int lat, int lon, unsigned int id, bool bollard, bool traffic_light)
: NodeInfo(lat, lon, id), bollard(bollard), trafficLight(traffic_light)
{
}
void UseDefaultOutputNames(const std::filesystem::path &base)
ExternalMemoryNode() : bollard(false), trafficLight(false) {}
static ExternalMemoryNode min_value() { return ExternalMemoryNode(0, 0, 0, false, false); }
static ExternalMemoryNode max_value()
{
IOConfig::UseDefaultOutputNames(base);
return ExternalMemoryNode(std::numeric_limits<int>::max(),
std::numeric_limits<int>::max(),
std::numeric_limits<unsigned>::max(),
false,
false);
}
double log_edge_updates_factor = 0.0;
std::time_t valid_now;
NodeID key() const { return id; }
std::vector<std::string> segment_speed_lookup_paths;
std::vector<std::string> turn_penalty_lookup_paths;
std::string tz_file_path;
bool bollard;
bool trafficLight;
};
} // namespace osrm::updater
#endif // EXTRACTOR_OPTIONS_HPP
struct ImportNode : public ExternalMemoryNode
{
HashTable<std::string, std::string> keyVals;
inline void Clear()
{
keyVals.clear();
lat = 0;
lon = 0;
id = 0;
bollard = false;
trafficLight = false;
}
};
#endif /* IMPORTNODE_H_ */

View File

@ -0,0 +1,123 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef INPUT_READER_FACTORY_H
#define INPUT_READER_FACTORY_H
#include <boost/assert.hpp>
#include <bzlib.h>
#include <libxml/xmlreader.h>
struct BZ2Context
{
FILE *file;
BZFILE *bz2;
int error;
int nUnused;
char unused[BZ_MAX_UNUSED];
};
int readFromBz2Stream(void *pointer, char *buffer, int len)
{
void *unusedTmpVoid = NULL;
char *unusedTmp = NULL;
BZ2Context *context = (BZ2Context *)pointer;
int read = 0;
while (0 == read &&
!(BZ_STREAM_END == context->error && 0 == context->nUnused && feof(context->file)))
{
read = BZ2_bzRead(&context->error, context->bz2, buffer, len);
if (BZ_OK == context->error)
{
return read;
}
else if (BZ_STREAM_END == context->error)
{
BZ2_bzReadGetUnused(&context->error, context->bz2, &unusedTmpVoid, &context->nUnused);
BOOST_ASSERT_MSG(BZ_OK == context->error, "Could not BZ2_bzReadGetUnused");
unusedTmp = (char *)unusedTmpVoid;
for (int i = 0; i < context->nUnused; i++)
{
context->unused[i] = unusedTmp[i];
}
BZ2_bzReadClose(&context->error, context->bz2);
BOOST_ASSERT_MSG(BZ_OK == context->error, "Could not BZ2_bzReadClose");
context->error = BZ_STREAM_END; // set to the stream end for next call to this function
if (0 == context->nUnused && feof(context->file))
{
return read;
}
else
{
context->bz2 = BZ2_bzReadOpen(
&context->error, context->file, 0, 0, context->unused, context->nUnused);
BOOST_ASSERT_MSG(NULL != context->bz2, "Could not open file");
}
}
else
{
BOOST_ASSERT_MSG(false, "Could not read bz2 file");
}
}
return read;
}
int closeBz2Stream(void *pointer)
{
BZ2Context *context = (BZ2Context *)pointer;
fclose(context->file);
delete context;
return 0;
}
xmlTextReaderPtr inputReaderFactory(const char *name)
{
std::string inputName(name);
if (inputName.find(".osm.bz2") != std::string::npos)
{
BZ2Context *context = new BZ2Context();
context->error = false;
context->file = fopen(name, "r");
int error;
context->bz2 =
BZ2_bzReadOpen(&error, context->file, 0, 0, context->unused, context->nUnused);
if (context->bz2 == NULL || context->file == NULL)
{
delete context;
return NULL;
}
return xmlReaderForIO(readFromBz2Stream, closeBz2Stream, (void *)context, NULL, NULL, 0);
}
else
{
return xmlNewTextReaderFilename(name);
}
}
#endif // INPUT_READER_FACTORY_H

View File

@ -0,0 +1,237 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// based on https://svn.apache.org/repos/asf/mesos/tags/release-0.9.0-incubating-RC0/src/common/json.hpp
#ifndef JSON_CONTAINER_H
#define JSON_CONTAINER_H
#include "../Util/StringUtil.h"
#include <boost/variant.hpp>
#include <iostream>
#include <vector>
#include <string>
#include <unordered_map>
namespace JSON
{
struct String;
struct Number;
struct Object;
struct Array;
struct True;
struct False;
struct Null;
typedef boost::variant<boost::recursive_wrapper<String>,
boost::recursive_wrapper<Number>,
boost::recursive_wrapper<Object>,
boost::recursive_wrapper<Array>,
boost::recursive_wrapper<True>,
boost::recursive_wrapper<False>,
boost::recursive_wrapper<Null> > Value;
struct String
{
String() {}
String(const char *value) : value(value) {}
String(const std::string &value) : value(value) {}
std::string value;
};
struct Number
{
Number() {}
Number(double value) : value(value) {}
double value;
};
struct Object
{
std::unordered_map<std::string, Value> values;
};
struct Array
{
std::vector<Value> values;
};
struct True
{
};
struct False
{
};
struct Null
{
};
struct Renderer : boost::static_visitor<>
{
Renderer(std::ostream &_out) : out(_out) {}
void operator()(const String &string) const { out << "\"" << string.value << "\""; }
void operator()(const Number &number) const
{
out.precision(10);
out << number.value;
}
void operator()(const Object &object) const
{
out << "{";
auto iterator = object.values.begin();
while (iterator != object.values.end())
{
out << "\"" << (*iterator).first << "\":";
boost::apply_visitor(Renderer(out), (*iterator).second);
if (++iterator != object.values.end())
{
out << ",";
}
}
out << "}";
}
void operator()(const Array &array) const
{
out << "[";
std::vector<Value>::const_iterator iterator;
iterator = array.values.begin();
while (iterator != array.values.end())
{
boost::apply_visitor(Renderer(out), *iterator);
if (++iterator != array.values.end())
{
out << ",";
}
}
out << "]";
}
void operator()(const True &) const { out << "true"; }
void operator()(const False &) const { out << "false"; }
void operator()(const Null &) const { out << "null"; }
private:
std::ostream &out;
};
struct ArrayRenderer : boost::static_visitor<>
{
ArrayRenderer(std::vector<char> &_out) : out(_out) {}
void operator()(const String &string) const {
out.push_back('\"');
out.insert(out.end(), string.value.begin(), string.value.end());
out.push_back('\"');
}
void operator()(const Number &number) const
{
const std::string number_string = FixedDoubleToString(number.value);
out.insert(out.end(), number_string.begin(), number_string.end());
}
void operator()(const Object &object) const
{
out.push_back('{');
auto iterator = object.values.begin();
while (iterator != object.values.end())
{
out.push_back('\"');
out.insert(out.end(), (*iterator).first.begin(), (*iterator).first.end());
out.push_back('\"');
out.push_back(':');
boost::apply_visitor(ArrayRenderer(out), (*iterator).second);
if (++iterator != object.values.end())
{
out.push_back(',');
}
}
out.push_back('}');
}
void operator()(const Array &array) const
{
out.push_back('[');
std::vector<Value>::const_iterator iterator;
iterator = array.values.begin();
while (iterator != array.values.end())
{
boost::apply_visitor(ArrayRenderer(out), *iterator);
if (++iterator != array.values.end())
{
out.push_back(',');
}
}
out.push_back(']');
}
void operator()(const True &) const {
const std::string temp("true");
out.insert(out.end(), temp.begin(), temp.end());
}
void operator()(const False &) const {
const std::string temp("false");
out.insert(out.end(), temp.begin(), temp.end());
}
void operator()(const Null &) const {
const std::string temp("null");
out.insert(out.end(), temp.begin(), temp.end());
}
private:
std::vector<char> &out;
};
inline void render(std::ostream &out, const Object &object)
{
Value value = object;
boost::apply_visitor(Renderer(out), value);
}
inline void render(std::vector<char> &out, const Object &object)
{
Value value = object;
boost::apply_visitor(ArrayRenderer(out), value);
}
} // namespace JSON
#endif // JSON_CONTAINER_H

97
DataStructures/LRUCache.h Normal file
View File

@ -0,0 +1,97 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef LRUCACHE_H
#define LRUCACHE_H
#include <list>
#include <unordered_map>
template <typename KeyT, typename ValueT> class LRUCache
{
private:
struct CacheEntry
{
CacheEntry(KeyT k, ValueT v) : key(k), value(v) {}
KeyT key;
ValueT value;
};
unsigned capacity;
std::list<CacheEntry> itemsInCache;
std::unordered_map<KeyT, typename std::list<CacheEntry>::iterator> positionMap;
public:
explicit LRUCache(unsigned c) : capacity(c) {}
bool Holds(KeyT key)
{
if (positionMap.find(key) != positionMap.end())
{
return true;
}
return false;
}
void Insert(const KeyT key, ValueT &value)
{
itemsInCache.push_front(CacheEntry(key, value));
positionMap.insert(std::make_pair(key, itemsInCache.begin()));
if (itemsInCache.size() > capacity)
{
positionMap.erase(itemsInCache.back().key);
itemsInCache.pop_back();
}
}
void Insert(const KeyT key, ValueT value)
{
itemsInCache.push_front(CacheEntry(key, value));
positionMap.insert(std::make_pair(key, itemsInCache.begin()));
if (itemsInCache.size() > capacity)
{
positionMap.erase(itemsInCache.back().key);
itemsInCache.pop_back();
}
}
bool Fetch(const KeyT key, ValueT &result)
{
if (Holds(key))
{
CacheEntry e = *(positionMap.find(key)->second);
result = e.value;
// move to front
itemsInCache.splice(positionMap.find(key)->second, itemsInCache, itemsInCache.begin());
positionMap.find(key)->second = itemsInCache.begin();
return true;
}
return false;
}
unsigned Size() const { return itemsInCache.size(); }
};
#endif // LRUCACHE_H

View File

@ -0,0 +1,110 @@
#ifndef __NODE_BASED_GRAPH_H__
#define __NODE_BASED_GRAPH_H__
#include "DynamicGraph.h"
#include "ImportEdge.h"
#include <memory>
struct NodeBasedEdgeData
{
NodeBasedEdgeData()
: distance(INVALID_EDGE_WEIGHT), edgeBasedNodeID(SPECIAL_NODEID),
nameID(std::numeric_limits<unsigned>::max()), type(std::numeric_limits<short>::max()),
isAccessRestricted(false), shortcut(false), forward(false), backward(false),
roundabout(false), ignore_in_grid(false), contraFlow(false)
{
}
int distance;
unsigned edgeBasedNodeID;
unsigned nameID;
short type;
bool isAccessRestricted : 1;
bool shortcut : 1;
bool forward : 1;
bool backward : 1;
bool roundabout : 1;
bool ignore_in_grid : 1;
bool contraFlow : 1;
void SwapDirectionFlags()
{
bool temp_flag = forward;
forward = backward;
backward = temp_flag;
}
bool IsEqualTo(const NodeBasedEdgeData &other) const
{
return (forward == other.forward) && (backward == other.backward) &&
(nameID == other.nameID) && (ignore_in_grid == other.ignore_in_grid) &&
(contraFlow == other.contraFlow);
}
};
typedef DynamicGraph<NodeBasedEdgeData> NodeBasedDynamicGraph;
// Factory method to create NodeBasedDynamicGraph from ImportEdges
inline std::shared_ptr<NodeBasedDynamicGraph>
NodeBasedDynamicGraphFromImportEdges(int number_of_nodes, std::vector<ImportEdge> &input_edge_list)
{
static_assert(sizeof(NodeBasedEdgeData) == 16, "changing node based edge data size changes memory consumption");
std::sort(input_edge_list.begin(), input_edge_list.end());
// TODO: remove duplicate edges
DeallocatingVector<NodeBasedDynamicGraph::InputEdge> edges_list;
NodeBasedDynamicGraph::InputEdge edge;
for (const ImportEdge &import_edge : input_edge_list)
{
// TODO: give ImportEdge a proper c'tor to use emplace_back's below
if (!import_edge.isForward())
{
edge.source = import_edge.target();
edge.target = import_edge.source();
edge.data.backward = import_edge.isForward();
edge.data.forward = import_edge.isBackward();
}
else
{
edge.source = import_edge.source();
edge.target = import_edge.target();
edge.data.forward = import_edge.isForward();
edge.data.backward = import_edge.isBackward();
}
if (edge.source == edge.target)
{
continue;
}
edge.data.distance = (std::max)((int)import_edge.weight(), 1);
BOOST_ASSERT(edge.data.distance > 0);
edge.data.shortcut = false;
edge.data.roundabout = import_edge.isRoundabout();
edge.data.ignore_in_grid = import_edge.ignoreInGrid();
edge.data.nameID = import_edge.name();
edge.data.type = import_edge.type();
edge.data.isAccessRestricted = import_edge.isAccessRestricted();
edge.data.contraFlow = import_edge.isContraFlow();
edges_list.push_back(edge);
if (!import_edge.IsSplit())
{
using std::swap; // enable ADL
swap(edge.source, edge.target);
edge.data.SwapDirectionFlags();
edges_list.push_back(edge);
}
}
std::sort(edges_list.begin(), edges_list.end());
auto graph = std::make_shared<NodeBasedDynamicGraph>(number_of_nodes, edges_list);
edges_list.clear();
BOOST_ASSERT(0 == edges_list.size());
return graph;
}
#endif // __NODE_BASED_GRAPH_H__

View File

@ -0,0 +1,60 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ORIGINAL_EDGE_DATA_H
#define ORIGINAL_EDGE_DATA_H
#include "TurnInstructions.h"
#include "../typedefs.h"
#include <limits>
struct OriginalEdgeData
{
explicit OriginalEdgeData(NodeID via_node,
unsigned name_id,
TurnInstruction turn_instruction,
bool compressed_geometry)
: via_node(via_node), name_id(name_id), turn_instruction(turn_instruction),
compressed_geometry(compressed_geometry)
{
}
OriginalEdgeData()
: via_node(std::numeric_limits<unsigned>::max()),
name_id(std::numeric_limits<unsigned>::max()),
turn_instruction(TurnInstruction::NoTurn), compressed_geometry(false)
{
}
NodeID via_node;
unsigned name_id;
TurnInstruction turn_instruction;
bool compressed_geometry;
};
#endif // ORIGINAL_EDGE_DATA_H

103
DataStructures/Percent.h Normal file
View File

@ -0,0 +1,103 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PERCENT_H
#define PERCENT_H
#include "../Util/OpenMPWrapper.h"
#include <iostream>
class Percent
{
public:
explicit Percent(unsigned max_value, unsigned step = 5) { reinit(max_value, step); }
// Reinitializes
void reinit(unsigned max_value, unsigned step = 5)
{
m_max_value = max_value;
m_current_value = 0;
m_percent_interval = m_max_value / 100;
m_next_threshold = m_percent_interval;
m_last_percent = 0;
m_step = step;
}
// If there has been significant progress, display it.
void printStatus(unsigned current_value)
{
if (current_value >= m_next_threshold)
{
m_next_threshold += m_percent_interval;
printPercent(current_value / (double)m_max_value * 100);
}
if (current_value + 1 == m_max_value)
std::cout << " 100%" << std::endl;
}
void printIncrement()
{
#pragma omp atomic
++m_current_value;
printStatus(m_current_value);
}
void printAddition(const unsigned addition)
{
#pragma omp atomic
m_current_value += addition;
printStatus(m_current_value);
}
private:
unsigned m_current_value;
unsigned m_max_value;
unsigned m_percent_interval;
unsigned m_next_threshold;
unsigned m_last_percent;
unsigned m_step;
// Displays progress.
void printPercent(double percent)
{
while (percent >= m_last_percent + m_step)
{
m_last_percent += m_step;
if (m_last_percent % 10 == 0)
{
std::cout << " " << m_last_percent << "% ";
}
else
{
std::cout << ".";
}
std::cout.flush();
}
}
};
#endif // PERCENT_H

View File

@ -0,0 +1,180 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PHANTOM_NODES_H
#define PHANTOM_NODES_H
#include <osrm/Coordinate.h>
#include "../Util/SimpleLogger.h"
#include "../typedefs.h"
struct PhantomNode
{
PhantomNode() :
forward_node_id(SPECIAL_NODEID),
reverse_node_id(SPECIAL_NODEID),
name_id(std::numeric_limits<unsigned>::max()),
forward_weight(INVALID_EDGE_WEIGHT),
reverse_weight(INVALID_EDGE_WEIGHT),
forward_offset(0),
reverse_offset(0),
packed_geometry_id(SPECIAL_EDGEID),
fwd_segment_position(0)
{ }
NodeID forward_node_id;
NodeID reverse_node_id;
unsigned name_id;
int forward_weight;
int reverse_weight;
int forward_offset;
int reverse_offset;
unsigned packed_geometry_id;
FixedPointCoordinate location;
unsigned short fwd_segment_position;
int GetForwardWeightPlusOffset() const
{
if (SPECIAL_NODEID == forward_node_id)
{
return 0;
}
const int result = (forward_offset + forward_weight);
return result;
}
int GetReverseWeightPlusOffset() const
{
if (SPECIAL_NODEID == reverse_node_id)
{
return 0;
}
const int result = (reverse_offset + reverse_weight);
return result;
}
void Reset()
{
forward_node_id = SPECIAL_NODEID;
name_id = SPECIAL_NODEID;
forward_weight = INVALID_EDGE_WEIGHT;
reverse_weight = INVALID_EDGE_WEIGHT;
forward_offset = 0;
reverse_offset = 0;
location.Reset();
}
bool isBidirected() const
{
return (forward_node_id != SPECIAL_NODEID) &&
(reverse_node_id != SPECIAL_NODEID);
}
bool IsCompressed() const
{
return (forward_offset != 0) || (reverse_offset != 0);
}
bool isValid(const unsigned numberOfNodes) const
{
return
location.isValid() &&
(
(forward_node_id < numberOfNodes) ||
(reverse_node_id < numberOfNodes)
) &&
(
(forward_weight != INVALID_EDGE_WEIGHT) ||
(reverse_weight != INVALID_EDGE_WEIGHT)
) &&
(name_id != std::numeric_limits<unsigned>::max()
);
}
bool isValid() const
{
return location.isValid() &&
(name_id != std::numeric_limits<unsigned>::max());
}
bool operator==(const PhantomNode & other) const
{
return location == other.location;
}
};
struct PhantomNodes
{
PhantomNode source_phantom;
PhantomNode target_phantom;
void Reset()
{
source_phantom.Reset();
target_phantom.Reset();
}
bool PhantomsAreOnSameNodeBasedEdge() const
{
return (source_phantom.forward_node_id == target_phantom.forward_node_id);
}
bool AtLeastOnePhantomNodeIsInvalid() const
{
return ((source_phantom.forward_node_id == SPECIAL_NODEID) && (source_phantom.reverse_node_id == SPECIAL_NODEID)) ||
((target_phantom.forward_node_id == SPECIAL_NODEID) && (target_phantom.reverse_node_id == SPECIAL_NODEID));
}
bool PhantomNodesHaveEqualLocation() const
{
return source_phantom == target_phantom;
}
};
inline std::ostream& operator<<(std::ostream &out, const PhantomNodes & pn)
{
out << "source_coord: " << pn.source_phantom.location << "\n";
out << "target_coord: " << pn.target_phantom.location << std::endl;
return out;
}
inline std::ostream& operator<<(std::ostream &out, const PhantomNode & pn)
{
out << "node1: " << pn.forward_node_id << ", " <<
"node2: " << pn.reverse_node_id << ", " <<
"name: " << pn.name_id << ", " <<
"fwd-w: " << pn.forward_weight << ", " <<
"rev-w: " << pn.reverse_weight << ", " <<
"fwd-o: " << pn.forward_offset << ", " <<
"rev-o: " << pn.reverse_offset << ", " <<
"geom: " << pn.packed_geometry_id << ", " <<
"pos: " << pn.fwd_segment_position << ", " <<
"loc: " << pn.location;
return out;
}
#endif // PHANTOM_NODES_H

View File

@ -0,0 +1,64 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef QUERYEDGE_H_
#define QUERYEDGE_H_
#include "../typedefs.h"
struct QueryEdge
{
NodeID source;
NodeID target;
struct EdgeData
{
NodeID id : 31;
bool shortcut : 1;
int distance : 30;
bool forward : 1;
bool backward : 1;
} data;
bool operator<(const QueryEdge &right) const
{
if (source != right.source)
{
return source < right.source;
}
return target < right.target;
}
bool operator==(const QueryEdge &right) const
{
return (source == right.source && target == right.target &&
data.distance == right.data.distance && data.shortcut == right.data.shortcut &&
data.forward == right.data.forward && data.backward == right.data.backward &&
data.id == right.data.id);
}
};
#endif /* QUERYEDGE_H_ */

View File

@ -0,0 +1,85 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef QUERY_NODE_H
#define QUERY_NODE_H
#include "../typedefs.h"
#include <osrm/Coordinate.h>
#include <boost/assert.hpp>
#include <limits>
struct NodeInfo
{
typedef NodeID key_type; // type of NodeID
typedef int value_type; // type of lat,lons
NodeInfo(int lat, int lon, NodeID id) : lat(lat), lon(lon), id(id) {}
NodeInfo()
: lat(std::numeric_limits<int>::max()), lon(std::numeric_limits<int>::max()),
id(std::numeric_limits<unsigned>::max())
{
}
int lat;
int lon;
NodeID id;
static NodeInfo min_value()
{
return NodeInfo(-90 * COORDINATE_PRECISION,
-180 * COORDINATE_PRECISION,
std::numeric_limits<NodeID>::min());
}
static NodeInfo max_value()
{
return NodeInfo(90 * COORDINATE_PRECISION,
180 * COORDINATE_PRECISION,
std::numeric_limits<NodeID>::max());
}
value_type operator[](const std::size_t n) const
{
switch (n)
{
case 1:
return lat;
case 0:
return lon;
default:
break;
}
BOOST_ASSERT_MSG(false, "should not happen");
return std::numeric_limits<unsigned>::max();
}
};
#endif // QUERY_NODE_H

View File

@ -0,0 +1,84 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef RAW_ROUTE_DATA_H
#define RAW_ROUTE_DATA_H
#include "../DataStructures/PhantomNodes.h"
#include "../DataStructures/TurnInstructions.h"
#include "../typedefs.h"
#include <osrm/Coordinate.h>
#include <limits>
#include <vector>
struct PathData
{
PathData()
: node(std::numeric_limits<unsigned>::max()), name_id(std::numeric_limits<unsigned>::max()),
segment_duration(std::numeric_limits<unsigned>::max()),
turn_instruction(TurnInstruction::NoTurn)
{
}
PathData(NodeID node, unsigned name_id, TurnInstruction turn_instruction, unsigned segment_duration)
: node(node), name_id(name_id), segment_duration(segment_duration), turn_instruction(turn_instruction)
{
}
NodeID node;
unsigned name_id;
unsigned segment_duration;
TurnInstruction turn_instruction;
};
struct RawRouteData
{
std::vector<std::vector<PathData>> unpacked_path_segments;
std::vector<PathData> unpacked_alternative;
std::vector<PhantomNodes> segment_end_coordinates;
std::vector<FixedPointCoordinate> raw_via_node_coordinates;
unsigned check_sum;
int shortest_path_length;
int alternative_path_length;
bool source_traversed_in_reverse;
bool target_traversed_in_reverse;
bool alt_source_traversed_in_reverse;
bool alt_target_traversed_in_reverse;
RawRouteData()
: check_sum(std::numeric_limits<unsigned>::max()),
shortest_path_length(std::numeric_limits<int>::max()),
alternative_path_length(std::numeric_limits<int>::max()),
source_traversed_in_reverse(false), target_traversed_in_reverse(false),
alt_source_traversed_in_reverse(false), alt_target_traversed_in_reverse(false)
{
}
};
#endif // RAW_ROUTE_DATA_H

View File

@ -0,0 +1,126 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef RESTRICTION_H
#define RESTRICTION_H
#include "../typedefs.h"
#include <limits>
struct TurnRestriction
{
NodeID viaNode;
NodeID fromNode;
NodeID toNode;
struct Bits
{ // mostly unused
Bits()
: isOnly(false), unused1(false), unused2(false), unused3(false), unused4(false),
unused5(false), unused6(false), unused7(false)
{
}
bool isOnly : 1;
bool unused1 : 1;
bool unused2 : 1;
bool unused3 : 1;
bool unused4 : 1;
bool unused5 : 1;
bool unused6 : 1;
bool unused7 : 1;
} flags;
explicit TurnRestriction(NodeID viaNode)
: viaNode(viaNode), fromNode(std::numeric_limits<unsigned>::max()),
toNode(std::numeric_limits<unsigned>::max())
{
}
explicit TurnRestriction(const bool isOnly = false)
: viaNode(std::numeric_limits<unsigned>::max()),
fromNode(std::numeric_limits<unsigned>::max()),
toNode(std::numeric_limits<unsigned>::max())
{
flags.isOnly = isOnly;
}
};
struct InputRestrictionContainer
{
EdgeID fromWay;
EdgeID toWay;
unsigned viaNode;
TurnRestriction restriction;
InputRestrictionContainer(EdgeID fromWay, EdgeID toWay, NodeID vn, unsigned vw)
: fromWay(fromWay), toWay(toWay), viaNode(vw)
{
restriction.viaNode = vn;
}
explicit InputRestrictionContainer(bool isOnly = false)
: fromWay(std::numeric_limits<unsigned>::max()),
toWay(std::numeric_limits<unsigned>::max()), viaNode(std::numeric_limits<unsigned>::max())
{
restriction.flags.isOnly = isOnly;
}
static InputRestrictionContainer min_value() { return InputRestrictionContainer(0, 0, 0, 0); }
static InputRestrictionContainer max_value()
{
return InputRestrictionContainer(std::numeric_limits<unsigned>::max(),
std::numeric_limits<unsigned>::max(),
std::numeric_limits<unsigned>::max(),
std::numeric_limits<unsigned>::max());
}
};
struct CmpRestrictionContainerByFrom
{
typedef InputRestrictionContainer value_type;
inline bool operator()(const InputRestrictionContainer &a, const InputRestrictionContainer &b)
const
{
return a.fromWay < b.fromWay;
}
inline value_type max_value() const { return InputRestrictionContainer::max_value(); }
inline value_type min_value() const { return InputRestrictionContainer::min_value(); }
};
struct CmpRestrictionContainerByTo
{
typedef InputRestrictionContainer value_type;
inline bool operator()(const InputRestrictionContainer &a, const InputRestrictionContainer &b)
const
{
return a.toWay < b.toWay;
}
value_type max_value() const { return InputRestrictionContainer::max_value(); }
value_type min_value() const { return InputRestrictionContainer::min_value(); }
};
#endif // RESTRICTION_H

View File

@ -0,0 +1,215 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "RestrictionMap.h"
#include "NodeBasedGraph.h"
#include "../Util/SimpleLogger.h"
bool RestrictionMap::IsNodeAViaNode(const NodeID node) const
{
return m_no_turn_via_node_set.find(node) != m_no_turn_via_node_set.end();
}
RestrictionMap::RestrictionMap(const std::shared_ptr<NodeBasedDynamicGraph> &graph,
const std::vector<TurnRestriction> &input_restrictions_list)
: m_count(0), m_graph(graph)
{
// decompose restriction consisting of a start, via and end node into a
// a pair of starting edge and a list of all end nodes
for (auto &restriction : input_restrictions_list)
{
m_restriction_start_nodes.insert(restriction.fromNode);
m_no_turn_via_node_set.insert(restriction.viaNode);
std::pair<NodeID, NodeID> restriction_source = {restriction.fromNode, restriction.viaNode};
unsigned index;
auto restriction_iter = m_restriction_map.find(restriction_source);
if (restriction_iter == m_restriction_map.end())
{
index = m_restriction_bucket_list.size();
m_restriction_bucket_list.resize(index + 1);
m_restriction_map.emplace(restriction_source, index);
}
else
{
index = restriction_iter->second;
// Map already contains an is_only_*-restriction
if (m_restriction_bucket_list.at(index).begin()->second)
{
continue;
}
else if (restriction.flags.isOnly)
{
// We are going to insert an is_only_*-restriction. There can be only one.
m_count -= m_restriction_bucket_list.at(index).size();
m_restriction_bucket_list.at(index).clear();
}
}
++m_count;
m_restriction_bucket_list.at(index)
.emplace_back(restriction.toNode, restriction.flags.isOnly);
}
}
// Replace end v with w in each turn restriction containing u as via node
void RestrictionMap::FixupArrivingTurnRestriction(const NodeID u, const NodeID v, const NodeID w)
{
BOOST_ASSERT(u != SPECIAL_NODEID);
BOOST_ASSERT(v != SPECIAL_NODEID);
BOOST_ASSERT(w != SPECIAL_NODEID);
if (!RestrictionStartsAtNode(u))
{
return;
}
// find all potential start edges
// it is more efficent to get a (small) list of potential start edges than iterating over all buckets
std::vector<NodeID> predecessors;
for (const EdgeID current_edge_id : m_graph->GetAdjacentEdgeRange(u))
{
const EdgeData &edge_data = m_graph->GetEdgeData(current_edge_id);
const NodeID target = m_graph->GetTarget(current_edge_id);
if (edge_data.backward && (v != target))
{
predecessors.push_back(target);
}
}
for (const NodeID x : predecessors)
{
auto restriction_iterator = m_restriction_map.find({x, u});
if (restriction_iterator == m_restriction_map.end())
{
continue;
}
const unsigned index = restriction_iterator->second;
auto &bucket = m_restriction_bucket_list.at(index);
for (RestrictionTarget &restriction_target : bucket)
{
if (v == restriction_target.first)
{
restriction_target.first = w;
}
}
}
}
// Replaces start edge (v, w) with (u, w). Only start node changes.
void RestrictionMap::FixupStartingTurnRestriction(const NodeID u, const NodeID v, const NodeID w)
{
BOOST_ASSERT(u != SPECIAL_NODEID);
BOOST_ASSERT(v != SPECIAL_NODEID);
BOOST_ASSERT(w != SPECIAL_NODEID);
if (!RestrictionStartsAtNode(u))
{
return;
}
const auto restriction_iterator = m_restriction_map.find({v, w});
if (restriction_iterator != m_restriction_map.end())
{
const unsigned index = restriction_iterator->second;
// remove old restriction start (v,w)
m_restriction_map.erase(restriction_iterator);
// insert new restriction start (u,w) (point to index)
RestrictionSource new_source = {u, w};
m_restriction_map.emplace(new_source, index);
}
}
// Check if edge (u, v) is the start of any turn restriction.
// If so returns id of first target node.
NodeID RestrictionMap::CheckForEmanatingIsOnlyTurn(const NodeID u, const NodeID v) const
{
BOOST_ASSERT(u != SPECIAL_NODEID);
BOOST_ASSERT(v != SPECIAL_NODEID);
if (!RestrictionStartsAtNode(u))
{
return SPECIAL_NODEID;
}
auto restriction_iter = m_restriction_map.find({u, v});
if (restriction_iter != m_restriction_map.end())
{
const unsigned index = restriction_iter->second;
auto &bucket = m_restriction_bucket_list.at(index);
for (const RestrictionSource &restriction_target : bucket)
{
if (restriction_target.second)
{
return restriction_target.first;
}
}
}
return SPECIAL_NODEID;
}
// Checks if turn <u,v,w> is actually a turn restriction.
bool RestrictionMap::CheckIfTurnIsRestricted(const NodeID u, const NodeID v, const NodeID w) const
{
BOOST_ASSERT(u != SPECIAL_NODEID);
BOOST_ASSERT(v != SPECIAL_NODEID);
BOOST_ASSERT(w != SPECIAL_NODEID);
if (!RestrictionStartsAtNode(u))
{
return false;
}
auto restriction_iter = m_restriction_map.find({u, v});
if (restriction_iter != m_restriction_map.end())
{
const unsigned index = restriction_iter->second;
const auto &bucket = m_restriction_bucket_list.at(index);
for (const RestrictionTarget &restriction_target : bucket)
{
if ((w == restriction_target.first) && // target found
(!restriction_target.second)) // and not an only_-restr.
{
return true;
}
}
}
return false;
}
// check of node is the start of any restriction
bool RestrictionMap::RestrictionStartsAtNode(const NodeID node) const
{
if (m_restriction_start_nodes.find(node) == m_restriction_start_nodes.end())
{
return false;
}
return true;
}

View File

@ -0,0 +1,74 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __RESTRICTION_MAP_H__
#define __RESTRICTION_MAP_H__
#include <memory>
#include "DynamicGraph.h"
#include "Restriction.h"
#include "NodeBasedGraph.h"
#include "../Util/StdHashExtensions.h"
#include "../typedefs.h"
#include <unordered_map>
#include <unordered_set>
// Efficent look up if an edge is the start + via node of a TurnRestriction
// EdgeBasedEdgeFactory decides by it if edges are inserted or geometry is compressed
class RestrictionMap
{
public:
RestrictionMap(const std::shared_ptr<NodeBasedDynamicGraph> &graph,
const std::vector<TurnRestriction> &input_restrictions_list);
void FixupArrivingTurnRestriction(const NodeID u, const NodeID v, const NodeID w);
void FixupStartingTurnRestriction(const NodeID u, const NodeID v, const NodeID w);
NodeID CheckForEmanatingIsOnlyTurn(const NodeID u, const NodeID v) const;
bool CheckIfTurnIsRestricted(const NodeID u, const NodeID v, const NodeID w) const;
bool IsNodeAViaNode(const NodeID node) const;
unsigned size() { return m_count; }
private:
bool RestrictionStartsAtNode(const NodeID node) const;
typedef std::pair<NodeID, NodeID> RestrictionSource;
typedef std::pair<NodeID, bool> RestrictionTarget;
typedef std::vector<RestrictionTarget> EmanatingRestrictionsVector;
typedef NodeBasedDynamicGraph::EdgeData EdgeData;
unsigned m_count;
std::shared_ptr<NodeBasedDynamicGraph> m_graph;
//! index -> list of (target, isOnly)
std::vector<EmanatingRestrictionsVector> m_restriction_bucket_list;
//! maps (start, via) -> bucket index
std::unordered_map<RestrictionSource, unsigned> m_restriction_map;
std::unordered_set<NodeID> m_restriction_start_nodes;
std::unordered_set<NodeID> m_no_turn_via_node_set;
};
#endif

View File

@ -0,0 +1,82 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <osrm/RouteParameters.h>
#include <boost/fusion/container/vector.hpp>
#include <boost/fusion/sequence/intrinsic.hpp>
#include <boost/fusion/include/at_c.hpp>
RouteParameters::RouteParameters()
: zoom_level(18), print_instructions(false), alternate_route(true), geometry(true),
compression(true), deprecatedAPI(false), check_sum(-1)
{
}
void RouteParameters::setZoomLevel(const short level)
{
if (18 >= level && 0 <= level)
{
zoom_level = level;
}
}
void RouteParameters::setAlternateRouteFlag(const bool flag) { alternate_route = flag; }
void RouteParameters::setDeprecatedAPIFlag(const std::string &) { deprecatedAPI = true; }
void RouteParameters::setChecksum(const unsigned sum) { check_sum = sum; }
void RouteParameters::setInstructionFlag(const bool flag) { print_instructions = flag; }
void RouteParameters::setService(const std::string &service_string) { service = service_string; }
void RouteParameters::setOutputFormat(const std::string &format) { output_format = format; }
void RouteParameters::setJSONpParameter(const std::string &parameter) { jsonp_parameter = parameter; }
void RouteParameters::addHint(const std::string &hint)
{
hints.resize(coordinates.size());
if (!hints.empty())
{
hints.back() = hint;
}
}
void RouteParameters::setLanguage(const std::string &language_string) { language = language_string; }
void RouteParameters::setGeometryFlag(const bool flag) { geometry = flag; }
void RouteParameters::setCompressionFlag(const bool flag) { compression = flag; }
void RouteParameters::addCoordinate(const boost::fusion::vector<double, double> &transmitted_coordinates)
{
const int lat = COORDINATE_PRECISION * boost::fusion::at_c<0>(transmitted_coordinates);
const int lon = COORDINATE_PRECISION * boost::fusion::at_c<1>(transmitted_coordinates);
coordinates.emplace_back(lat, lon);
}

View File

@ -0,0 +1,60 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SEARCHENGINE_H
#define SEARCHENGINE_H
#include "SearchEngineData.h"
#include "../RoutingAlgorithms/AlternativePathRouting.h"
#include "../RoutingAlgorithms/ManyToManyRouting.h"
#include "../RoutingAlgorithms/ShortestPathRouting.h"
#include <type_traits>
template <class DataFacadeT> class SearchEngine
{
private:
DataFacadeT *facade;
SearchEngineData engine_working_data;
public:
ShortestPathRouting<DataFacadeT> shortest_path;
AlternativeRouting<DataFacadeT> alternative_path;
ManyToManyRouting<DataFacadeT> distance_table;
explicit SearchEngine(DataFacadeT *facade)
: facade(facade), shortest_path(facade, engine_working_data),
alternative_path(facade, engine_working_data), distance_table(facade, engine_working_data)
{
static_assert(!std::is_pointer<DataFacadeT>(), "don't instantiate with ptr type");
static_assert(std::is_object<DataFacadeT>(), "don't instantiate with void, function, or reference");
}
~SearchEngine() {}
};
#endif // SEARCHENGINE_H

View File

@ -0,0 +1,91 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "SearchEngineData.h"
void SearchEngineData::InitializeOrClearFirstThreadLocalStorage(const unsigned number_of_nodes)
{
if (forwardHeap.get())
{
forwardHeap->Clear();
}
else
{
forwardHeap.reset(new QueryHeap(number_of_nodes));
}
if (backwardHeap.get())
{
backwardHeap->Clear();
}
else
{
backwardHeap.reset(new QueryHeap(number_of_nodes));
}
}
void SearchEngineData::InitializeOrClearSecondThreadLocalStorage(const unsigned number_of_nodes)
{
if (forwardHeap2.get())
{
forwardHeap2->Clear();
}
else
{
forwardHeap2.reset(new QueryHeap(number_of_nodes));
}
if (backwardHeap2.get())
{
backwardHeap2->Clear();
}
else
{
backwardHeap2.reset(new QueryHeap(number_of_nodes));
}
}
void SearchEngineData::InitializeOrClearThirdThreadLocalStorage(const unsigned number_of_nodes)
{
if (forwardHeap3.get())
{
forwardHeap3->Clear();
}
else
{
forwardHeap3.reset(new QueryHeap(number_of_nodes));
}
if (backwardHeap3.get())
{
backwardHeap3->Clear();
}
else
{
backwardHeap3.reset(new QueryHeap(number_of_nodes));
}
}

View File

@ -0,0 +1,66 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SEARCH_ENGINE_DATA_H
#define SEARCH_ENGINE_DATA_H
#include "BinaryHeap.h"
#include "QueryEdge.h"
#include "StaticGraph.h"
#include "../typedefs.h"
#include <boost/thread.hpp>
#include <vector>
struct HeapData
{
NodeID parent;
/* explicit */ HeapData(NodeID p) : parent(p) {}
};
struct SearchEngineData
{
typedef BinaryHeap<NodeID, NodeID, int, HeapData, UnorderedMapStorage<NodeID, int>> QueryHeap;
typedef boost::thread_specific_ptr<QueryHeap> SearchEngineHeapPtr;
static SearchEngineHeapPtr forwardHeap;
static SearchEngineHeapPtr backwardHeap;
static SearchEngineHeapPtr forwardHeap2;
static SearchEngineHeapPtr backwardHeap2;
static SearchEngineHeapPtr forwardHeap3;
static SearchEngineHeapPtr backwardHeap3;
void InitializeOrClearFirstThreadLocalStorage(const unsigned number_of_nodes);
void InitializeOrClearSecondThreadLocalStorage(const unsigned number_of_nodes);
void InitializeOrClearThirdThreadLocalStorage(const unsigned number_of_nodes);
};
#endif // SEARCH_ENGINE_DATA_H

View File

@ -0,0 +1,70 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SEGMENT_INFORMATION_H
#define SEGMENT_INFORMATION_H
#include "TurnInstructions.h"
#include "../typedefs.h"
#include <osrm/Coordinate.h>
// Struct fits everything in one cache line
struct SegmentInformation
{
FixedPointCoordinate location;
NodeID name_id;
unsigned duration;
double length;
short bearing; // more than enough [0..3600] fits into 12 bits
TurnInstruction turn_instruction;
bool necessary;
explicit SegmentInformation(const FixedPointCoordinate &location,
const NodeID name_id,
const unsigned duration,
const double length,
const TurnInstruction turn_instruction,
const bool necessary)
: location(location), name_id(name_id), duration(duration), length(length), bearing(0),
turn_instruction(turn_instruction), necessary(necessary)
{
}
explicit SegmentInformation(const FixedPointCoordinate &location,
const NodeID name_id,
const unsigned duration,
const double length,
const TurnInstruction turn_instruction)
: location(location), name_id(name_id), duration(duration), length(length), bearing(0),
turn_instruction(turn_instruction), necessary(turn_instruction != TurnInstruction::NoTurn)
{
}
};
#endif /* SEGMENT_INFORMATION_H */

View File

@ -0,0 +1,233 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SHARED_MEMORY_FACTORY_H
#define SHARED_MEMORY_FACTORY_H
#include "../Util/OSRMException.h"
#include "../Util/SimpleLogger.h"
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/interprocess/mapped_region.hpp>
#include <boost/interprocess/xsi_shared_memory.hpp>
#ifdef __linux__
#include <sys/ipc.h>
#include <sys/shm.h>
#endif
// #include <cstring>
#include <cstdint>
#include <algorithm>
#include <exception>
struct OSRMLockFile
{
boost::filesystem::path operator()()
{
boost::filesystem::path temp_dir = boost::filesystem::temp_directory_path();
boost::filesystem::path lock_file = temp_dir / "osrm.lock";
return lock_file;
}
};
class SharedMemory
{
// Remove shared memory on destruction
class shm_remove
{
private:
int m_shmid;
bool m_initialized;
public:
void SetID(int shmid)
{
m_shmid = shmid;
m_initialized = true;
}
shm_remove() : m_shmid(INT_MIN), m_initialized(false) {}
shm_remove(const shm_remove &) = delete;
~shm_remove()
{
if (m_initialized)
{
SimpleLogger().Write(logDEBUG) << "automatic memory deallocation";
if (!boost::interprocess::xsi_shared_memory::remove(m_shmid))
{
SimpleLogger().Write(logDEBUG) << "could not deallocate id " << m_shmid;
}
}
}
};
public:
void *Ptr() const { return region.get_address(); }
SharedMemory() = delete;
SharedMemory(const SharedMemory &) = delete;
template <typename IdentifierT>
SharedMemory(const boost::filesystem::path &lock_file,
const IdentifierT id,
const uint64_t size = 0,
bool read_write = false,
bool remove_prev = true)
: key(lock_file.string().c_str(), id)
{
if (0 == size)
{ // read_only
shm = boost::interprocess::xsi_shared_memory(boost::interprocess::open_only, key);
region = boost::interprocess::mapped_region(
shm,
(read_write ? boost::interprocess::read_write : boost::interprocess::read_only));
}
else
{ // writeable pointer
// remove previously allocated mem
if (remove_prev)
{
Remove(key);
}
shm = boost::interprocess::xsi_shared_memory(
boost::interprocess::open_or_create, key, size);
#ifdef __linux__
if (-1 == shmctl(shm.get_shmid(), SHM_LOCK, 0))
{
if (ENOMEM == errno)
{
SimpleLogger().Write(logWARNING) << "could not lock shared memory to RAM";
}
}
#endif
region = boost::interprocess::mapped_region(shm, boost::interprocess::read_write);
remover.SetID(shm.get_shmid());
SimpleLogger().Write(logDEBUG) << "writeable memory allocated " << size << " bytes";
}
}
template <typename IdentifierT> static bool RegionExists(const IdentifierT id)
{
bool result = true;
try
{
OSRMLockFile lock_file;
boost::interprocess::xsi_key key(lock_file().string().c_str(), id);
result = RegionExists(key);
}
catch (...) { result = false; }
return result;
}
template <typename IdentifierT> static bool Remove(const IdentifierT id)
{
OSRMLockFile lock_file;
boost::interprocess::xsi_key key(lock_file().string().c_str(), id);
return Remove(key);
}
private:
static bool RegionExists(const boost::interprocess::xsi_key &key)
{
bool result = true;
try { boost::interprocess::xsi_shared_memory shm(boost::interprocess::open_only, key); }
catch (...) { result = false; }
return result;
}
static bool Remove(const boost::interprocess::xsi_key &key)
{
bool ret = false;
try
{
SimpleLogger().Write(logDEBUG) << "deallocating prev memory";
boost::interprocess::xsi_shared_memory xsi(boost::interprocess::open_only, key);
ret = boost::interprocess::xsi_shared_memory::remove(xsi.get_shmid());
}
catch (const boost::interprocess::interprocess_exception &e)
{
if (e.get_error_code() != boost::interprocess::not_found_error)
{
throw;
}
}
return ret;
}
boost::interprocess::xsi_key key;
boost::interprocess::xsi_shared_memory shm;
boost::interprocess::mapped_region region;
shm_remove remover;
};
template <class LockFileT = OSRMLockFile> class SharedMemoryFactory_tmpl
{
public:
template <typename IdentifierT>
static SharedMemory *Get(const IdentifierT &id,
const uint64_t size = 0,
bool read_write = false,
bool remove_prev = true)
{
try
{
LockFileT lock_file;
if (!boost::filesystem::exists(lock_file()))
{
if (0 == size)
{
throw OSRMException("lock file does not exist, exiting");
}
else
{
boost::filesystem::ofstream ofs(lock_file());
ofs.close();
}
}
return new SharedMemory(lock_file(), id, size, read_write, remove_prev);
}
catch (const boost::interprocess::interprocess_exception &e)
{
SimpleLogger().Write(logWARNING) << "caught exception: " << e.what() << ", code "
<< e.get_error_code();
throw OSRMException(e.what());
}
}
SharedMemoryFactory_tmpl() = delete;
SharedMemoryFactory_tmpl(const SharedMemoryFactory_tmpl &) = delete;
};
typedef SharedMemoryFactory_tmpl<> SharedMemoryFactory;
#endif /* SHARED_MEMORY_POINTER_FACTORY_H */

View File

@ -0,0 +1,156 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SHARED_MEMORY_VECTOR_WRAPPER_H
#define SHARED_MEMORY_VECTOR_WRAPPER_H
#include "../Util/SimpleLogger.h"
#include <boost/assert.hpp>
#include <algorithm>
#include <iterator>
#include <type_traits>
#include <vector>
template <typename DataT> class ShMemIterator : public std::iterator<std::input_iterator_tag, DataT>
{
DataT *p;
public:
explicit ShMemIterator(DataT *x) : p(x) {}
ShMemIterator(const ShMemIterator &mit) : p(mit.p) {}
ShMemIterator &operator++()
{
++p;
return *this;
}
ShMemIterator operator++(int)
{
ShMemIterator tmp(*this);
operator++();
return tmp;
}
ShMemIterator operator+(std::ptrdiff_t diff)
{
ShMemIterator tmp(p + diff);
return tmp;
}
bool operator==(const ShMemIterator &rhs) { return p == rhs.p; }
bool operator!=(const ShMemIterator &rhs) { return p != rhs.p; }
DataT &operator*() { return *p; }
};
template <typename DataT> class SharedMemoryWrapper
{
private:
DataT *m_ptr;
std::size_t m_size;
public:
SharedMemoryWrapper() : m_ptr(nullptr), m_size(0) {}
SharedMemoryWrapper(DataT *ptr, std::size_t size) : m_ptr(ptr), m_size(size) {}
void swap(SharedMemoryWrapper<DataT> &other)
{
BOOST_ASSERT_MSG(m_size != 0 || other.size() != 0, "size invalid");
std::swap(m_size, other.m_size);
std::swap(m_ptr, other.m_ptr);
}
DataT &at(const std::size_t index) { return m_ptr[index]; }
const DataT &at(const std::size_t index) const { return m_ptr[index]; }
ShMemIterator<DataT> begin() const { return ShMemIterator<DataT>(m_ptr); }
ShMemIterator<DataT> end() const { return ShMemIterator<DataT>(m_ptr + m_size); }
std::size_t size() const { return m_size; }
bool empty() const { return 0 == size(); }
DataT &operator[](const unsigned index)
{
BOOST_ASSERT_MSG(index < m_size, "invalid size");
return m_ptr[index];
}
const DataT &operator[](const unsigned index) const
{
BOOST_ASSERT_MSG(index < m_size, "invalid size");
return m_ptr[index];
}
};
template <> class SharedMemoryWrapper<bool>
{
private:
unsigned *m_ptr;
std::size_t m_size;
public:
SharedMemoryWrapper() : m_ptr(nullptr), m_size(0) {}
SharedMemoryWrapper(unsigned *ptr, std::size_t size) : m_ptr(ptr), m_size(size) {}
void swap(SharedMemoryWrapper<bool> &other)
{
BOOST_ASSERT_MSG(m_size != 0 || other.size() != 0, "size invalid");
std::swap(m_size, other.m_size);
std::swap(m_ptr, other.m_ptr);
}
bool at(const std::size_t index) const
{
const unsigned bucket = index / 32;
const unsigned offset = index % 32;
return m_ptr[bucket] & (1 << offset);
}
std::size_t size() const { return m_size; }
bool empty() const { return 0 == size(); }
bool operator[](const unsigned index)
{
BOOST_ASSERT_MSG(index < m_size, "invalid size");
const unsigned bucket = index / 32;
const unsigned offset = index % 32;
return m_ptr[bucket] & (1 << offset);
}
};
template <typename DataT, bool UseSharedMemory> struct ShM
{
typedef typename std::conditional<UseSharedMemory,
SharedMemoryWrapper<DataT>,
std::vector<DataT>>::type vector;
};
#endif // SHARED_MEMORY_VECTOR_WRAPPER_H

View File

@ -0,0 +1,229 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef STATIC_GRAPH_H
#define STATIC_GRAPH_H
#include "../DataStructures/Percent.h"
#include "../DataStructures/SharedMemoryVectorWrapper.h"
#include "../Util/SimpleLogger.h"
#include "../typedefs.h"
#include <boost/range/irange.hpp>
#include <algorithm>
#include <limits>
#include <vector>
template <typename EdgeDataT, bool UseSharedMemory = false> class StaticGraph
{
public:
typedef decltype(boost::irange(0u,0u)) EdgeRange;
typedef NodeID NodeIterator;
typedef NodeID EdgeIterator;
typedef EdgeDataT EdgeData;
class InputEdge
{
public:
EdgeDataT data;
NodeIterator source;
NodeIterator target;
bool operator<(const InputEdge &right) const
{
if (source != right.source)
{
return source < right.source;
}
return target < right.target;
}
};
struct NodeArrayEntry
{
// index of the first edge
EdgeIterator first_edge;
};
struct EdgeArrayEntry
{
NodeID target;
EdgeDataT data;
};
EdgeRange GetAdjacentEdgeRange(const NodeID node) const
{
return boost::irange(BeginEdges(node), EndEdges(node));
}
StaticGraph(const int nodes, std::vector<InputEdge> &graph)
{
std::sort(graph.begin(), graph.end());
number_of_nodes = nodes;
number_of_edges = (EdgeIterator)graph.size();
node_array.resize(number_of_nodes + 1);
EdgeIterator edge = 0;
EdgeIterator position = 0;
for (NodeIterator node = 0; node <= number_of_nodes; ++node)
{
EdgeIterator last_edge = edge;
while (edge < number_of_edges && graph[edge].source == node)
{
++edge;
}
node_array[node].first_edge = position; //=edge
position += edge - last_edge; // remove
}
edge_array.resize(position); //(edge)
edge = 0;
for (NodeIterator node = 0; node < number_of_nodes; ++node)
{
EdgeIterator e = node_array[node + 1].first_edge;
for (EdgeIterator i = node_array[node].first_edge; i != e; ++i)
{
edge_array[i].target = graph[edge].target;
edge_array[i].data = graph[edge].data;
assert(edge_array[i].data.distance > 0);
edge++;
}
}
}
StaticGraph(typename ShM<NodeArrayEntry, UseSharedMemory>::vector &nodes,
typename ShM<EdgeArrayEntry, UseSharedMemory>::vector &edges)
{
number_of_nodes = nodes.size() - 1;
number_of_edges = edges.size();
node_array.swap(nodes);
edge_array.swap(edges);
#ifndef NDEBUG
Percent p(GetNumberOfNodes());
for (unsigned u = 0; u < GetNumberOfNodes(); ++u)
{
for (auto eid : GetAdjacentEdgeRange(u))
{
const unsigned v = GetTarget(eid);
const EdgeData &data = GetEdgeData(eid);
if (data.shortcut)
{
const EdgeID first_edge_id = FindEdgeInEitherDirection(u, data.id);
if (SPECIAL_EDGEID == first_edge_id)
{
SimpleLogger().Write(logWARNING) << "cannot find first segment of edge ("
<< u << "," << data.id << "," << v
<< "), eid: " << eid;
BOOST_ASSERT(false);
}
const EdgeID second_edge_id = FindEdgeInEitherDirection(data.id, v);
if (SPECIAL_EDGEID == second_edge_id)
{
SimpleLogger().Write(logWARNING) << "cannot find second segment of edge ("
<< u << "," << data.id << "," << v
<< "), eid: " << eid;
BOOST_ASSERT(false);
}
}
}
p.printIncrement();
}
#endif
}
unsigned GetNumberOfNodes() const { return number_of_nodes -1; }
unsigned GetNumberOfEdges() const { return number_of_edges; }
unsigned GetOutDegree(const NodeIterator n) const { return BeginEdges(n) - EndEdges(n) - 1; }
inline NodeIterator GetTarget(const EdgeIterator e) const
{
return NodeIterator(edge_array[e].target);
}
inline EdgeDataT &GetEdgeData(const EdgeIterator e) { return edge_array[e].data; }
const EdgeDataT &GetEdgeData(const EdgeIterator e) const { return edge_array[e].data; }
EdgeIterator BeginEdges(const NodeIterator n) const
{
return EdgeIterator(node_array.at(n).first_edge);
}
EdgeIterator EndEdges(const NodeIterator n) const
{
return EdgeIterator(node_array.at(n + 1).first_edge);
}
// searches for a specific edge
EdgeIterator FindEdge(const NodeIterator from, const NodeIterator to) const
{
EdgeIterator smallest_edge = SPECIAL_EDGEID;
EdgeWeight smallest_weight = INVALID_EDGE_WEIGHT;
for (auto edge : GetAdjacentEdgeRange(from))
{
const NodeID target = GetTarget(edge);
const EdgeWeight weight = GetEdgeData(edge).distance;
if (target == to && weight < smallest_weight)
{
smallest_edge = edge;
smallest_weight = weight;
}
}
return smallest_edge;
}
EdgeIterator FindEdgeInEitherDirection(const NodeIterator from, const NodeIterator to) const
{
EdgeIterator tmp = FindEdge(from, to);
return (SPECIAL_NODEID != tmp ? tmp : FindEdge(to, from));
}
EdgeIterator
FindEdgeIndicateIfReverse(const NodeIterator from, const NodeIterator to, bool &result) const
{
EdgeIterator current_iterator = FindEdge(from, to);
if (SPECIAL_NODEID == current_iterator)
{
current_iterator = FindEdge(to, from);
if (SPECIAL_NODEID != current_iterator)
{
result = true;
}
}
return current_iterator;
}
private:
NodeIterator number_of_nodes;
EdgeIterator number_of_edges;
typename ShM<NodeArrayEntry, UseSharedMemory>::vector node_array;
typename ShM<EdgeArrayEntry, UseSharedMemory>::vector edge_array;
};
#endif // STATIC_GRAPH_H

View File

@ -0,0 +1,260 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// KD Tree coded by Christian Vetter, Monav Project
#ifndef STATICKDTREE_H_INCLUDED
#define STATICKDTREE_H_INCLUDED
#include <boost/assert.hpp>
#include <vector>
#include <algorithm>
#include <stack>
#include <limits>
namespace KDTree
{
#define KDTREE_BASESIZE (8)
template <unsigned k, typename T> class BoundingBox
{
public:
BoundingBox()
{
for (unsigned dim = 0; dim < k; ++dim)
{
min[dim] = std::numeric_limits<T>::min();
max[dim] = std::numeric_limits<T>::max();
}
}
T min[k];
T max[k];
};
struct NoData
{
};
template <unsigned k, typename T> class EuclidianMetric
{
public:
double operator()(const T left[k], const T right[k])
{
double result = 0;
for (unsigned i = 0; i < k; ++i)
{
double temp = (double)left[i] - (double)right[i];
result += temp * temp;
}
return result;
}
double operator()(const BoundingBox<k, T> &box, const T point[k])
{
T nearest[k];
for (unsigned dim = 0; dim < k; ++dim)
{
if (point[dim] < box.min[dim])
nearest[dim] = box.min[dim];
else if (point[dim] > box.max[dim])
nearest[dim] = box.max[dim];
else
nearest[dim] = point[dim];
}
return operator()(point, nearest);
}
};
template <unsigned k, typename T, typename Data = NoData, typename Metric = EuclidianMetric<k, T>>
class StaticKDTree
{
public:
struct InputPoint
{
T coordinates[k];
Data data;
bool operator==(const InputPoint &right)
{
for (int i = 0; i < k; i++)
{
if (coordinates[i] != right.coordinates[i])
return false;
}
return true;
}
};
explicit StaticKDTree(std::vector<InputPoint> *points)
{
BOOST_ASSERT(k > 0);
BOOST_ASSERT(points->size() > 0);
size = points->size();
kdtree = new InputPoint[size];
for (Iterator i = 0; i != size; ++i)
{
kdtree[i] = points->at(i);
for (unsigned dim = 0; dim < k; ++dim)
{
if (kdtree[i].coordinates[dim] < boundingBox.min[dim])
boundingBox.min[dim] = kdtree[i].coordinates[dim];
if (kdtree[i].coordinates[dim] > boundingBox.max[dim])
boundingBox.max[dim] = kdtree[i].coordinates[dim];
}
}
std::stack<Tree> s;
s.push(Tree(0, size, 0));
while (!s.empty())
{
Tree tree = s.top();
s.pop();
if (tree.right - tree.left < KDTREE_BASESIZE)
continue;
Iterator middle = tree.left + (tree.right - tree.left) / 2;
std::nth_element(
kdtree + tree.left, kdtree + middle, kdtree + tree.right, Less(tree.dimension));
s.push(Tree(tree.left, middle, (tree.dimension + 1) % k));
s.push(Tree(middle + 1, tree.right, (tree.dimension + 1) % k));
}
}
~StaticKDTree() { delete[] kdtree; }
bool NearestNeighbor(InputPoint *result, const InputPoint &point)
{
Metric distance;
bool found = false;
double nearestDistance = std::numeric_limits<T>::max();
std::stack<NNTree> s;
s.push(NNTree(0, size, 0, boundingBox));
while (!s.empty())
{
NNTree tree = s.top();
s.pop();
if (distance(tree.box, point.coordinates) >= nearestDistance)
continue;
if (tree.right - tree.left < KDTREE_BASESIZE)
{
for (unsigned i = tree.left; i < tree.right; i++)
{
double newDistance = distance(kdtree[i].coordinates, point.coordinates);
if (newDistance < nearestDistance)
{
nearestDistance = newDistance;
*result = kdtree[i];
found = true;
}
}
continue;
}
Iterator middle = tree.left + (tree.right - tree.left) / 2;
double newDistance = distance(kdtree[middle].coordinates, point.coordinates);
if (newDistance < nearestDistance)
{
nearestDistance = newDistance;
*result = kdtree[middle];
found = true;
}
Less comperator(tree.dimension);
if (!comperator(point, kdtree[middle]))
{
NNTree first(middle + 1, tree.right, (tree.dimension + 1) % k, tree.box);
NNTree second(tree.left, middle, (tree.dimension + 1) % k, tree.box);
first.box.min[tree.dimension] = kdtree[middle].coordinates[tree.dimension];
second.box.max[tree.dimension] = kdtree[middle].coordinates[tree.dimension];
s.push(second);
s.push(first);
}
else
{
NNTree first(middle + 1, tree.right, (tree.dimension + 1) % k, tree.box);
NNTree second(tree.left, middle, (tree.dimension + 1) % k, tree.box);
first.box.min[tree.dimension] = kdtree[middle].coordinates[tree.dimension];
second.box.max[tree.dimension] = kdtree[middle].coordinates[tree.dimension];
s.push(first);
s.push(second);
}
}
return found;
}
private:
typedef unsigned Iterator;
struct Tree
{
Iterator left;
Iterator right;
unsigned dimension;
Tree() {}
Tree(Iterator l, Iterator r, unsigned d) : left(l), right(r), dimension(d) {}
};
struct NNTree
{
Iterator left;
Iterator right;
unsigned dimension;
BoundingBox<k, T> box;
NNTree() {}
NNTree(Iterator l, Iterator r, unsigned d, const BoundingBox<k, T> &b)
: left(l), right(r), dimension(d), box(b)
{
}
};
class Less
{
public:
explicit Less(unsigned d)
{
dimension = d;
BOOST_ASSERT(dimension < k);
}
bool operator()(const InputPoint &left, const InputPoint &right)
{
BOOST_ASSERT(dimension < k);
return left.coordinates[dimension] < right.coordinates[dimension];
}
private:
unsigned dimension;
};
BoundingBox<k, T> boundingBox;
InputPoint *kdtree;
Iterator size;
};
}
#endif // STATICKDTREE_H_INCLUDED

View File

@ -0,0 +1,781 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef STATICRTREE_H
#define STATICRTREE_H
#include "DeallocatingVector.h"
#include "HilbertValue.h"
#include "PhantomNodes.h"
#include "QueryNode.h"
#include "SharedMemoryFactory.h"
#include "SharedMemoryVectorWrapper.h"
#include "../Util/MercatorUtil.h"
#include "../Util/OSRMException.h"
#include "../Util/SimpleLogger.h"
#include "../typedefs.h"
#include <osrm/Coordinate.h>
#include <boost/assert.hpp>
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/thread.hpp>
#include <algorithm>
#include <array>
#include <chrono>
#include <limits>
#include <memory>
#include <queue>
#include <string>
#include <vector>
// tuning parameters
const static uint32_t RTREE_BRANCHING_FACTOR = 64;
const static uint32_t RTREE_LEAF_NODE_SIZE = 1024;
static boost::thread_specific_ptr<boost::filesystem::ifstream> thread_local_rtree_stream;
// Implements a static, i.e. packed, R-tree
template <class DataT,
class CoordinateListT = std::vector<FixedPointCoordinate>,
bool UseSharedMemory = false>
class StaticRTree
{
public:
struct RectangleInt2D
{
RectangleInt2D() : min_lon(INT_MAX), max_lon(INT_MIN), min_lat(INT_MAX), max_lat(INT_MIN) {}
int32_t min_lon, max_lon;
int32_t min_lat, max_lat;
inline void InitializeMBRectangle(const std::array<DataT, RTREE_LEAF_NODE_SIZE> &objects,
const uint32_t element_count,
const std::vector<NodeInfo> &coordinate_list)
{
for (uint32_t i = 0; i < element_count; ++i)
{
min_lon = std::min(min_lon,
std::min(coordinate_list.at(objects[i].u).lon,
coordinate_list.at(objects[i].v).lon));
max_lon = std::max(max_lon,
std::max(coordinate_list.at(objects[i].u).lon,
coordinate_list.at(objects[i].v).lon));
min_lat = std::min(min_lat,
std::min(coordinate_list.at(objects[i].u).lat,
coordinate_list.at(objects[i].v).lat));
max_lat = std::max(max_lat,
std::max(coordinate_list.at(objects[i].u).lat,
coordinate_list.at(objects[i].v).lat));
}
}
inline void AugmentMBRectangle(const RectangleInt2D &other)
{
min_lon = std::min(min_lon, other.min_lon);
max_lon = std::max(max_lon, other.max_lon);
min_lat = std::min(min_lat, other.min_lat);
max_lat = std::max(max_lat, other.max_lat);
}
inline FixedPointCoordinate Centroid() const
{
FixedPointCoordinate centroid;
// The coordinates of the midpoints are given by:
// x = (x1 + x2) /2 and y = (y1 + y2) /2.
centroid.lon = (min_lon + max_lon) / 2;
centroid.lat = (min_lat + max_lat) / 2;
return centroid;
}
inline bool Intersects(const RectangleInt2D &other) const
{
FixedPointCoordinate upper_left(other.max_lat, other.min_lon);
FixedPointCoordinate upper_right(other.max_lat, other.max_lon);
FixedPointCoordinate lower_right(other.min_lat, other.max_lon);
FixedPointCoordinate lower_left(other.min_lat, other.min_lon);
return (Contains(upper_left) || Contains(upper_right) || Contains(lower_right) ||
Contains(lower_left));
}
inline float GetMinDist(const FixedPointCoordinate &location) const
{
bool is_contained = Contains(location);
if (is_contained)
{
return 0.;
}
float min_dist = std::numeric_limits<float>::max();
min_dist = std::min(min_dist,
FixedPointCoordinate::ApproximateEuclideanDistance(
location.lat, location.lon, max_lat, min_lon));
min_dist = std::min(min_dist,
FixedPointCoordinate::ApproximateEuclideanDistance(
location.lat, location.lon, max_lat, max_lon));
min_dist = std::min(min_dist,
FixedPointCoordinate::ApproximateEuclideanDistance(
location.lat, location.lon, min_lat, max_lon));
min_dist = std::min(min_dist,
FixedPointCoordinate::ApproximateEuclideanDistance(
location.lat, location.lon, min_lat, min_lon));
return min_dist;
}
inline float GetMinMaxDist(const FixedPointCoordinate &location) const
{
float min_max_dist = std::numeric_limits<float>::max();
// Get minmax distance to each of the four sides
FixedPointCoordinate upper_left(max_lat, min_lon);
FixedPointCoordinate upper_right(max_lat, max_lon);
FixedPointCoordinate lower_right(min_lat, max_lon);
FixedPointCoordinate lower_left(min_lat, min_lon);
min_max_dist = std::min(
min_max_dist,
std::max(
FixedPointCoordinate::ApproximateEuclideanDistance(location, upper_left),
FixedPointCoordinate::ApproximateEuclideanDistance(location, upper_right)));
min_max_dist = std::min(
min_max_dist,
std::max(
FixedPointCoordinate::ApproximateEuclideanDistance(location, upper_right),
FixedPointCoordinate::ApproximateEuclideanDistance(location, lower_right)));
min_max_dist = std::min(
min_max_dist,
std::max(FixedPointCoordinate::ApproximateEuclideanDistance(location, lower_right),
FixedPointCoordinate::ApproximateEuclideanDistance(location, lower_left)));
min_max_dist = std::min(
min_max_dist,
std::max(FixedPointCoordinate::ApproximateEuclideanDistance(location, lower_left),
FixedPointCoordinate::ApproximateEuclideanDistance(location, upper_left)));
return min_max_dist;
}
inline bool Contains(const FixedPointCoordinate &location) const
{
const bool lats_contained = (location.lat > min_lat) && (location.lat < max_lat);
const bool lons_contained = (location.lon > min_lon) && (location.lon < max_lon);
return lats_contained && lons_contained;
}
inline friend std::ostream &operator<<(std::ostream &out, const RectangleInt2D &rect)
{
out << rect.min_lat / COORDINATE_PRECISION << "," << rect.min_lon / COORDINATE_PRECISION
<< " " << rect.max_lat / COORDINATE_PRECISION << ","
<< rect.max_lon / COORDINATE_PRECISION;
return out;
}
};
typedef RectangleInt2D RectangleT;
struct TreeNode
{
TreeNode() : child_count(0), child_is_on_disk(false) {}
RectangleT minimum_bounding_rectangle;
uint32_t child_count : 31;
bool child_is_on_disk : 1;
uint32_t children[RTREE_BRANCHING_FACTOR];
};
private:
struct WrappedInputElement
{
explicit WrappedInputElement(const uint32_t _array_index, const uint64_t _hilbert_value)
: m_array_index(_array_index), m_hilbert_value(_hilbert_value)
{
}
WrappedInputElement() : m_array_index(UINT_MAX), m_hilbert_value(0) {}
uint32_t m_array_index;
uint64_t m_hilbert_value;
inline bool operator<(const WrappedInputElement &other) const
{
return m_hilbert_value < other.m_hilbert_value;
}
};
struct LeafNode
{
LeafNode() : object_count(0) {}
uint32_t object_count;
std::array<DataT, RTREE_LEAF_NODE_SIZE> objects;
};
struct QueryCandidate
{
explicit QueryCandidate(const uint32_t n_id, const float dist)
: node_id(n_id), min_dist(dist)
{
}
QueryCandidate() : node_id(UINT_MAX), min_dist(std::numeric_limits<float>::max()) {}
uint32_t node_id;
float min_dist;
inline bool operator<(const QueryCandidate &other) const
{
return min_dist < other.min_dist;
}
};
typename ShM<TreeNode, UseSharedMemory>::vector m_search_tree;
uint64_t m_element_count;
const std::string m_leaf_node_filename;
std::shared_ptr<CoordinateListT> m_coordinate_list;
public:
StaticRTree() = delete;
StaticRTree(const StaticRTree &) = delete;
// Construct a packed Hilbert-R-Tree with Kamel-Faloutsos algorithm [1]
explicit StaticRTree(std::vector<DataT> &input_data_vector,
const std::string tree_node_filename,
const std::string leaf_node_filename,
const std::vector<NodeInfo> &coordinate_list)
: m_element_count(input_data_vector.size()), m_leaf_node_filename(leaf_node_filename)
{
SimpleLogger().Write() << "constructing r-tree of " << m_element_count
<< " edge elements build on-top of " << coordinate_list.size()
<< " coordinates";
std::chrono::time_point<std::chrono::steady_clock> time0 = std::chrono::steady_clock::now();
std::vector<WrappedInputElement> input_wrapper_vector(m_element_count);
HilbertCode get_hilbert_number;
// generate auxiliary vector of hilbert-values
#pragma omp parallel for schedule(guided)
for (uint64_t element_counter = 0; element_counter < m_element_count; ++element_counter)
{
input_wrapper_vector[element_counter].m_array_index = element_counter;
// Get Hilbert-Value for centroid in mercartor projection
DataT const &current_element = input_data_vector[element_counter];
FixedPointCoordinate current_centroid =
DataT::Centroid(FixedPointCoordinate(coordinate_list.at(current_element.u).lat,
coordinate_list.at(current_element.u).lon),
FixedPointCoordinate(coordinate_list.at(current_element.v).lat,
coordinate_list.at(current_element.v).lon));
current_centroid.lat =
COORDINATE_PRECISION * lat2y(current_centroid.lat / COORDINATE_PRECISION);
uint64_t current_hilbert_value = get_hilbert_number(current_centroid);
input_wrapper_vector[element_counter].m_hilbert_value = current_hilbert_value;
}
// open leaf file
boost::filesystem::ofstream leaf_node_file(leaf_node_filename, std::ios::binary);
leaf_node_file.write((char *)&m_element_count, sizeof(uint64_t));
// sort the hilbert-value representatives
std::sort(input_wrapper_vector.begin(), input_wrapper_vector.end());
std::vector<TreeNode> tree_nodes_in_level;
// pack M elements into leaf node and write to leaf file
uint64_t processed_objects_count = 0;
while (processed_objects_count < m_element_count)
{
LeafNode current_leaf;
TreeNode current_node;
// SimpleLogger().Write() << "reading " << tree_size << " tree nodes in " <<
// (sizeof(TreeNode)*tree_size) << " bytes";
for (uint32_t current_element_index = 0; RTREE_LEAF_NODE_SIZE > current_element_index;
++current_element_index)
{
if (m_element_count > (processed_objects_count + current_element_index))
{
uint32_t index_of_next_object =
input_wrapper_vector[processed_objects_count + current_element_index]
.m_array_index;
current_leaf.objects[current_element_index] =
input_data_vector[index_of_next_object];
++current_leaf.object_count;
}
}
// generate tree node that resemble the objects in leaf and store it for next level
current_node.minimum_bounding_rectangle.InitializeMBRectangle(
current_leaf.objects, current_leaf.object_count, coordinate_list);
current_node.child_is_on_disk = true;
current_node.children[0] = tree_nodes_in_level.size();
tree_nodes_in_level.emplace_back(current_node);
// write leaf_node to leaf node file
leaf_node_file.write((char *)&current_leaf, sizeof(current_leaf));
processed_objects_count += current_leaf.object_count;
}
// close leaf file
leaf_node_file.close();
uint32_t processing_level = 0;
while (1 < tree_nodes_in_level.size())
{
std::vector<TreeNode> tree_nodes_in_next_level;
uint32_t processed_tree_nodes_in_level = 0;
while (processed_tree_nodes_in_level < tree_nodes_in_level.size())
{
TreeNode parent_node;
// pack RTREE_BRANCHING_FACTOR elements into tree_nodes each
for (uint32_t current_child_node_index = 0;
RTREE_BRANCHING_FACTOR > current_child_node_index;
++current_child_node_index)
{
if (processed_tree_nodes_in_level < tree_nodes_in_level.size())
{
TreeNode &current_child_node =
tree_nodes_in_level[processed_tree_nodes_in_level];
// add tree node to parent entry
parent_node.children[current_child_node_index] = m_search_tree.size();
m_search_tree.emplace_back(current_child_node);
// augment MBR of parent
parent_node.minimum_bounding_rectangle.AugmentMBRectangle(
current_child_node.minimum_bounding_rectangle);
// increase counters
++parent_node.child_count;
++processed_tree_nodes_in_level;
}
}
tree_nodes_in_next_level.emplace_back(parent_node);
}
tree_nodes_in_level.swap(tree_nodes_in_next_level);
++processing_level;
}
BOOST_ASSERT_MSG(1 == tree_nodes_in_level.size(), "tree broken, more than one root node");
// last remaining entry is the root node, store it
m_search_tree.emplace_back(tree_nodes_in_level[0]);
// reverse and renumber tree to have root at index 0
std::reverse(m_search_tree.begin(), m_search_tree.end());
#pragma omp parallel for schedule(guided)
for (uint32_t i = 0; i < m_search_tree.size(); ++i)
{
TreeNode &current_tree_node = m_search_tree[i];
for (uint32_t j = 0; j < current_tree_node.child_count; ++j)
{
const uint32_t old_id = current_tree_node.children[j];
const uint32_t new_id = m_search_tree.size() - old_id - 1;
current_tree_node.children[j] = new_id;
}
}
// open tree file
boost::filesystem::ofstream tree_node_file(tree_node_filename, std::ios::binary);
uint32_t size_of_tree = m_search_tree.size();
BOOST_ASSERT_MSG(0 < size_of_tree, "tree empty");
tree_node_file.write((char *)&size_of_tree, sizeof(uint32_t));
tree_node_file.write((char *)&m_search_tree[0], sizeof(TreeNode) * size_of_tree);
// close tree node file.
tree_node_file.close();
std::chrono::time_point<std::chrono::steady_clock> time1 = std::chrono::steady_clock::now();
std::chrono::duration<double> elapsed_seconds = time1 - time0;
SimpleLogger().Write() << "finished r-tree construction in " << (elapsed_seconds.count())
<< " seconds";
}
// Read-only operation for queries
explicit StaticRTree(const boost::filesystem::path &node_file,
const boost::filesystem::path &leaf_file,
const std::shared_ptr<CoordinateListT> coordinate_list)
: m_leaf_node_filename(leaf_file.string())
{
// open tree node file and load into RAM.
m_coordinate_list = coordinate_list;
if (!boost::filesystem::exists(node_file))
{
throw OSRMException("ram index file does not exist");
}
if (0 == boost::filesystem::file_size(node_file))
{
throw OSRMException("ram index file is empty");
}
boost::filesystem::ifstream tree_node_file(node_file, std::ios::binary);
uint32_t tree_size = 0;
tree_node_file.read((char *)&tree_size, sizeof(uint32_t));
m_search_tree.resize(tree_size);
tree_node_file.read((char *)&m_search_tree[0], sizeof(TreeNode) * tree_size);
tree_node_file.close();
// open leaf node file and store thread specific pointer
if (!boost::filesystem::exists(leaf_file))
{
throw OSRMException("mem index file does not exist");
}
if (0 == boost::filesystem::file_size(leaf_file))
{
throw OSRMException("mem index file is empty");
}
boost::filesystem::ifstream leaf_node_file(leaf_file, std::ios::binary);
leaf_node_file.read((char *)&m_element_count, sizeof(uint64_t));
leaf_node_file.close();
// SimpleLogger().Write() << tree_size << " nodes in search tree";
// SimpleLogger().Write() << m_element_count << " elements in leafs";
}
explicit StaticRTree(TreeNode *tree_node_ptr,
const uint32_t number_of_nodes,
const boost::filesystem::path &leaf_file,
std::shared_ptr<CoordinateListT> coordinate_list)
: m_search_tree(tree_node_ptr, number_of_nodes), m_leaf_node_filename(leaf_file.string()),
m_coordinate_list(coordinate_list)
{
// open leaf node file and store thread specific pointer
if (!boost::filesystem::exists(leaf_file))
{
throw OSRMException("mem index file does not exist");
}
if (0 == boost::filesystem::file_size(leaf_file))
{
throw OSRMException("mem index file is empty");
}
boost::filesystem::ifstream leaf_node_file(leaf_file, std::ios::binary);
leaf_node_file.read((char *)&m_element_count, sizeof(uint64_t));
leaf_node_file.close();
if (thread_local_rtree_stream.get())
{
thread_local_rtree_stream->close();
}
// SimpleLogger().Write() << tree_size << " nodes in search tree";
// SimpleLogger().Write() << m_element_count << " elements in leafs";
}
// Read-only operation for queries
bool LocateClosestEndPointForCoordinate(const FixedPointCoordinate &input_coordinate,
FixedPointCoordinate &result_coordinate,
const unsigned zoom_level)
{
bool ignore_tiny_components = (zoom_level <= 14);
DataT nearest_edge;
float min_dist = std::numeric_limits<float>::max();
float min_max_dist = std::numeric_limits<float>::max();
bool found_a_nearest_edge = false;
// initialize queue with root element
std::priority_queue<QueryCandidate> traversal_queue;
float current_min_dist =
m_search_tree[0].minimum_bounding_rectangle.GetMinDist(input_coordinate);
traversal_queue.emplace(0, current_min_dist);
while (!traversal_queue.empty())
{
const QueryCandidate current_query_node = traversal_queue.top();
traversal_queue.pop();
const bool prune_downward = (current_query_node.min_dist >= min_max_dist);
const bool prune_upward = (current_query_node.min_dist >= min_dist);
if (!prune_downward && !prune_upward)
{ // downward pruning
TreeNode &current_tree_node = m_search_tree[current_query_node.node_id];
if (current_tree_node.child_is_on_disk)
{
LeafNode current_leaf_node;
LoadLeafFromDisk(current_tree_node.children[0], current_leaf_node);
for (uint32_t i = 0; i < current_leaf_node.object_count; ++i)
{
DataT const &current_edge = current_leaf_node.objects[i];
if (ignore_tiny_components && current_edge.is_in_tiny_cc)
{
continue;
}
float current_minimum_distance =
FixedPointCoordinate::ApproximateEuclideanDistance(
input_coordinate.lat,
input_coordinate.lon,
m_coordinate_list->at(current_edge.u).lat,
m_coordinate_list->at(current_edge.u).lon);
if (current_minimum_distance < min_dist)
{
// found a new minimum
min_dist = current_minimum_distance;
result_coordinate.lat = m_coordinate_list->at(current_edge.u).lat;
result_coordinate.lon = m_coordinate_list->at(current_edge.u).lon;
found_a_nearest_edge = true;
}
current_minimum_distance =
FixedPointCoordinate::ApproximateEuclideanDistance(
input_coordinate.lat,
input_coordinate.lon,
m_coordinate_list->at(current_edge.v).lat,
m_coordinate_list->at(current_edge.v).lon);
if (current_minimum_distance < min_dist)
{
// found a new minimum
min_dist = current_minimum_distance;
result_coordinate.lat = m_coordinate_list->at(current_edge.v).lat;
result_coordinate.lon = m_coordinate_list->at(current_edge.v).lon;
found_a_nearest_edge = true;
}
}
}
else
{
// traverse children, prune if global mindist is smaller than local one
for (uint32_t i = 0; i < current_tree_node.child_count; ++i)
{
const int32_t child_id = current_tree_node.children[i];
const TreeNode &child_tree_node = m_search_tree[child_id];
const RectangleT &child_rectangle =
child_tree_node.minimum_bounding_rectangle;
const float current_min_dist =
child_rectangle.GetMinDist(input_coordinate);
const float current_min_max_dist =
child_rectangle.GetMinMaxDist(input_coordinate);
if (current_min_max_dist < min_max_dist)
{
min_max_dist = current_min_max_dist;
}
if (current_min_dist > min_max_dist)
{
continue;
}
if (current_min_dist > min_dist)
{ // upward pruning
continue;
}
traversal_queue.emplace(child_id, current_min_dist);
}
}
}
}
return found_a_nearest_edge;
}
bool FindPhantomNodeForCoordinate(const FixedPointCoordinate &input_coordinate,
PhantomNode &result_phantom_node,
const unsigned zoom_level)
{
// SimpleLogger().Write() << "searching for coordinate " << input_coordinate;
const bool ignore_tiny_components = (zoom_level <= 14);
DataT nearest_edge;
float min_dist = std::numeric_limits<float>::max();
float min_max_dist = std::numeric_limits<float>::max();
bool found_a_nearest_edge = false;
FixedPointCoordinate nearest, current_start_coordinate, current_end_coordinate;
// initialize queue with root element
std::priority_queue<QueryCandidate> traversal_queue;
float current_min_dist =
m_search_tree[0].minimum_bounding_rectangle.GetMinDist(input_coordinate);
traversal_queue.emplace(0, current_min_dist);
BOOST_ASSERT_MSG(std::numeric_limits<float>::epsilon() >
(0. - traversal_queue.top().min_dist),
"Root element in NN Search has min dist != 0.");
LeafNode current_leaf_node;
while (!traversal_queue.empty())
{
const QueryCandidate current_query_node = traversal_queue.top();
traversal_queue.pop();
const bool prune_downward = (current_query_node.min_dist >= min_max_dist);
const bool prune_upward = (current_query_node.min_dist >= min_dist);
if (!prune_downward && !prune_upward)
{ // downward pruning
const TreeNode &current_tree_node = m_search_tree[current_query_node.node_id];
if (current_tree_node.child_is_on_disk)
{
LoadLeafFromDisk(current_tree_node.children[0], current_leaf_node);
for (uint32_t i = 0; i < current_leaf_node.object_count; ++i)
{
DataT &current_edge = current_leaf_node.objects[i];
if (ignore_tiny_components && current_edge.is_in_tiny_cc)
{
continue;
}
float current_ratio = 0.;
const float current_perpendicular_distance =
FixedPointCoordinate::ComputePerpendicularDistance(
m_coordinate_list->at(current_edge.u),
m_coordinate_list->at(current_edge.v),
input_coordinate,
nearest,
current_ratio);
BOOST_ASSERT(0. <= current_perpendicular_distance);
if ((current_perpendicular_distance < min_dist) &&
!EpsilonCompare(current_perpendicular_distance, min_dist))
{ // found a new minimum
min_dist = current_perpendicular_distance;
// TODO: use assignment c'tor in PhantomNode
result_phantom_node.forward_node_id =
current_edge.forward_edge_based_node_id;
result_phantom_node.reverse_node_id =
current_edge.reverse_edge_based_node_id;
result_phantom_node.name_id = current_edge.name_id;
result_phantom_node.forward_weight = current_edge.forward_weight;
result_phantom_node.reverse_weight = current_edge.reverse_weight;
result_phantom_node.forward_offset = current_edge.forward_offset;
result_phantom_node.reverse_offset = current_edge.reverse_offset;
result_phantom_node.packed_geometry_id =
current_edge.packed_geometry_id;
result_phantom_node.fwd_segment_position =
current_edge.fwd_segment_position;
result_phantom_node.location = nearest;
current_start_coordinate.lat =
m_coordinate_list->at(current_edge.u).lat;
current_start_coordinate.lon =
m_coordinate_list->at(current_edge.u).lon;
current_end_coordinate.lat = m_coordinate_list->at(current_edge.v).lat;
current_end_coordinate.lon = m_coordinate_list->at(current_edge.v).lon;
nearest_edge = current_edge;
found_a_nearest_edge = true;
}
}
}
else
{
// traverse children, prune if global mindist is smaller than local one
for (uint32_t i = 0; i < current_tree_node.child_count; ++i)
{
const int32_t child_id = current_tree_node.children[i];
TreeNode &child_tree_node = m_search_tree[child_id];
RectangleT &child_rectangle = child_tree_node.minimum_bounding_rectangle;
const float current_min_dist =
child_rectangle.GetMinDist(input_coordinate);
const float current_min_max_dist =
child_rectangle.GetMinMaxDist(input_coordinate);
if (current_min_max_dist < min_max_dist)
{
min_max_dist = current_min_max_dist;
}
if (current_min_dist > min_max_dist)
{
continue;
}
if (current_min_dist > min_dist)
{ // upward pruning
continue;
}
traversal_queue.emplace(child_id, current_min_dist);
}
}
}
}
// Hack to fix rounding errors and wandering via nodes.
if (1 == std::abs(input_coordinate.lon - result_phantom_node.location.lon))
{
result_phantom_node.location.lon = input_coordinate.lon;
}
if (1 == std::abs(input_coordinate.lat - result_phantom_node.location.lat))
{
result_phantom_node.location.lat = input_coordinate.lat;
}
float ratio = 0.f;
if (found_a_nearest_edge)
{
const float distance_1 = FixedPointCoordinate::ApproximateEuclideanDistance(
current_start_coordinate, result_phantom_node.location);
const float distance_2 = FixedPointCoordinate::ApproximateEuclideanDistance(
current_start_coordinate, current_end_coordinate);
ratio = distance_1 / distance_2;
ratio = std::min(1.f, ratio);
if (SPECIAL_NODEID != result_phantom_node.forward_node_id)
{
result_phantom_node.forward_weight *= ratio;
}
if (SPECIAL_NODEID != result_phantom_node.reverse_node_id)
{
result_phantom_node.reverse_weight *= (1. - ratio);
}
}
return found_a_nearest_edge;
}
private:
inline void LoadLeafFromDisk(const uint32_t leaf_id, LeafNode &result_node)
{
if (!thread_local_rtree_stream.get() || !thread_local_rtree_stream->is_open())
{
thread_local_rtree_stream.reset(new boost::filesystem::ifstream(
m_leaf_node_filename, std::ios::in | std::ios::binary));
}
if (!thread_local_rtree_stream->good())
{
thread_local_rtree_stream->clear(std::ios::goodbit);
SimpleLogger().Write(logDEBUG) << "Resetting stale filestream";
}
uint64_t seek_pos = sizeof(uint64_t) + leaf_id * sizeof(LeafNode);
thread_local_rtree_stream->seekg(seek_pos);
thread_local_rtree_stream->read((char *)&result_node, sizeof(LeafNode));
}
inline bool EdgesAreEquivalent(const FixedPointCoordinate &a,
const FixedPointCoordinate &b,
const FixedPointCoordinate &c,
const FixedPointCoordinate &d) const
{
return (a == b && c == d) || (a == c && b == d) || (a == d && b == c);
}
template<typename FloatT>
inline bool EpsilonCompare(const FloatT d1, const FloatT d2) const
{
return (std::abs(d1 - d2) < std::numeric_limits<FloatT>::epsilon());
}
};
//[1] "On Packing R-Trees"; I. Kamel, C. Faloutsos; 1993; DOI: 10.1145/170088.170403
//[2] "Nearest Neighbor Queries", N. Roussopulos et al; 1995; DOI: 10.1145/223784.223794
#endif // STATICRTREE_H

View File

@ -0,0 +1,90 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TURN_INSTRUCTIONS_H
#define TURN_INSTRUCTIONS_H
enum class TurnInstruction : unsigned char
{
NoTurn = 0, GoStraight, TurnSlightRight, TurnRight, TurnSharpRight, UTurn,
TurnSharpLeft, TurnLeft, TurnSlightLeft, ReachViaPoint, HeadOn, EnterRoundAbout,
LeaveRoundAbout, StayOnRoundAbout, StartAtEndOfStreet, ReachedYourDestination,
EnterAgainstAllowedDirection, LeaveAgainstAllowedDirection,
InverseAccessRestrictionFlag = 127,
AccessRestrictionFlag = 128,
AccessRestrictionPenalty = 129
};
struct TurnInstructionsClass
{
TurnInstructionsClass() = delete;
TurnInstructionsClass(const TurnInstructionsClass&) = delete;
static inline TurnInstruction GetTurnDirectionOfInstruction(const double angle)
{
if (angle >= 23 && angle < 67)
{
return TurnInstruction::TurnSharpRight;
}
if (angle >= 67 && angle < 113)
{
return TurnInstruction::TurnRight;
}
if (angle >= 113 && angle < 158)
{
return TurnInstruction::TurnSlightRight;
}
if (angle >= 158 && angle < 202)
{
return TurnInstruction::GoStraight;
}
if (angle >= 202 && angle < 248)
{
return TurnInstruction::TurnSlightLeft;
}
if (angle >= 248 && angle < 292)
{
return TurnInstruction::TurnLeft;
}
if (angle >= 292 && angle < 336)
{
return TurnInstruction::TurnSharpLeft;
}
return TurnInstruction::UTurn;
}
static inline bool TurnIsNecessary(const TurnInstruction turn_instruction)
{
if (TurnInstruction::NoTurn == turn_instruction || TurnInstruction::StayOnRoundAbout == turn_instruction)
{
return false;
}
return true;
}
};
#endif /* TURN_INSTRUCTIONS_H */

View File

@ -0,0 +1,115 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef XOR_FAST_HASH_H
#define XOR_FAST_HASH_H
#include <algorithm>
#include <vector>
/*
This is an implementation of Tabulation hashing, which has suprising properties like
universality.
The space requirement is 2*2^16 = 256 kb of memory, which fits into L2 cache.
Evaluation boils down to 10 or less assembly instruction on any recent X86 CPU:
1: movq table2(%rip), %rdx
2: movl %edi, %eax
3: movzwl %di, %edi
4: shrl $16, %eax
5: movzwl %ax, %eax
6: movzbl (%rdx,%rax), %eax
7: movq table1(%rip), %rdx
8: xorb (%rdx,%rdi), %al
9: movzbl %al, %eax
10: ret
*/
class XORFastHash
{ // 65k entries
std::vector<unsigned short> table1;
std::vector<unsigned short> table2;
public:
XORFastHash()
{
table1.resize(2 << 16);
table2.resize(2 << 16);
for (unsigned i = 0; i < (2 << 16); ++i)
{
table1[i] = i;
table2[i] = i;
}
std::random_shuffle(table1.begin(), table1.end());
std::random_shuffle(table2.begin(), table2.end());
}
inline unsigned short operator()(const unsigned originalValue) const
{
unsigned short lsb = ((originalValue) & 0xffff);
unsigned short msb = (((originalValue) >> 16) & 0xffff);
return table1[lsb] ^ table2[msb];
}
};
class XORMiniHash
{ // 256 entries
std::vector<unsigned char> table1;
std::vector<unsigned char> table2;
std::vector<unsigned char> table3;
std::vector<unsigned char> table4;
public:
XORMiniHash()
{
table1.resize(1 << 8);
table2.resize(1 << 8);
table3.resize(1 << 8);
table4.resize(1 << 8);
for (unsigned i = 0; i < (1 << 8); ++i)
{
table1[i] = i;
table2[i] = i;
table3[i] = i;
table4[i] = i;
}
std::random_shuffle(table1.begin(), table1.end());
std::random_shuffle(table2.begin(), table2.end());
std::random_shuffle(table3.begin(), table3.end());
std::random_shuffle(table4.begin(), table4.end());
}
unsigned char operator()(const unsigned originalValue) const
{
unsigned char byte1 = ((originalValue) & 0xff);
unsigned char byte2 = ((originalValue >> 8) & 0xff);
unsigned char byte3 = ((originalValue >> 16) & 0xff);
unsigned char byte4 = ((originalValue >> 24) & 0xff);
return table1[byte1] ^ table2[byte2] ^ table3[byte3] ^ table4[byte4];
}
};
#endif // XOR_FAST_HASH_H

View File

@ -0,0 +1,89 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef XOR_FAST_HASH_STORAGE_H
#define XOR_FAST_HASH_STORAGE_H
#include "XORFastHash.h"
#include <limits>
#include <vector>
template <typename NodeID, typename Key> class XORFastHashStorage
{
public:
struct HashCell
{
Key key;
NodeID id;
unsigned time;
HashCell()
: key(std::numeric_limits<unsigned>::max()), id(std::numeric_limits<unsigned>::max()),
time(std::numeric_limits<unsigned>::max())
{
}
HashCell(const HashCell &other) : key(other.key), id(other.id), time(other.time) {}
inline operator Key() const { return key; }
inline void operator=(const Key &key_to_insert) { key = key_to_insert; }
};
explicit XORFastHashStorage(size_t) : positions(2 << 16), current_timestamp(0) {}
inline HashCell &operator[](const NodeID node)
{
unsigned short position = fast_hasher(node);
while ((positions[position].time == current_timestamp) && (positions[position].id != node))
{
++position %= (2 << 16);
}
positions[position].id = node;
positions[position].time = current_timestamp;
return positions[position];
}
inline void Clear()
{
++current_timestamp;
if (std::numeric_limits<unsigned>::max() == current_timestamp)
{
positions.clear();
positions.resize((2 << 16));
}
}
private:
XORFastHashStorage() : positions(2 << 16), current_timestamp(0) {}
std::vector<HashCell> positions;
XORFastHash fast_hasher;
unsigned current_timestamp;
};
#endif // XOR_FAST_HASH_STORAGE_H

View File

@ -0,0 +1,63 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef BASE_DESCRIPTOR_H
#define BASE_DESCRIPTOR_H
#include "../DataStructures/PhantomNodes.h"
#include "../DataStructures/RawRouteData.h"
#include "../typedefs.h"
#include <osrm/Reply.h>
#include <string>
#include <vector>
struct DescriptorConfig
{
DescriptorConfig() : instructions(true), geometry(true), encode_geometry(true), zoom_level(18)
{
}
bool instructions;
bool geometry;
bool encode_geometry;
unsigned short zoom_level;
};
template <class DataFacadeT> class BaseDescriptor
{
public:
BaseDescriptor() {}
// Maybe someone can explain the pure virtual destructor thing to me (dennis)
virtual ~BaseDescriptor() {}
virtual void Run(const RawRouteData &raw_route,
const PhantomNodes &phantom_nodes,
http::Reply &reply) = 0;
virtual void SetConfig(const DescriptorConfig &config) = 0;
};
#endif // BASE_DESCRIPTOR_H

View File

@ -0,0 +1,80 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "DescriptionFactory.h"
DescriptionFactory::DescriptionFactory() : entireLength(0) {}
void DescriptionFactory::SetStartSegment(const PhantomNode &source)
{
start_phantom = source;
AppendSegment(source.location, PathData(0, source.name_id, TurnInstruction::HeadOn, source.forward_weight));
}
void DescriptionFactory::SetEndSegment(const PhantomNode &target)
{
target_phantom = target;
path_description.emplace_back(
target.location, target.name_id, 0, target.reverse_weight, TurnInstruction::NoTurn, true);
}
void DescriptionFactory::AppendSegment(const FixedPointCoordinate &coordinate,
const PathData &path_point)
{
if ((1 == path_description.size()) && (path_description.back().location == coordinate))
{
path_description.back().name_id = path_point.name_id;
return;
}
path_description.emplace_back(coordinate,
path_point.name_id,
path_point.segment_duration,
0,
path_point.turn_instruction);
}
JSON::Value DescriptionFactory::AppendEncodedPolylineString(const bool return_encoded)
{
if (return_encoded)
{
return polyline_compressor.printEncodedString(path_description);
}
return polyline_compressor.printUnencodedString(path_description);
}
JSON::Value DescriptionFactory::AppendUnencodedPolylineString() const
{
return polyline_compressor.printUnencodedString(path_description);
}
void DescriptionFactory::BuildRouteSummary(const double distance, const unsigned time)
{
summary.source_name_id = start_phantom.name_id;
summary.target_name_id = target_phantom.name_id;
summary.BuildDurationAndLengthStrings(distance, time);
}

View File

@ -0,0 +1,205 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef DESCRIPTIONFACTORY_H_
#define DESCRIPTIONFACTORY_H_
#include "../Algorithms/DouglasPeucker.h"
#include "../Algorithms/PolylineCompressor.h"
#include "../DataStructures/PhantomNodes.h"
#include "../DataStructures/RawRouteData.h"
#include "../DataStructures/SegmentInformation.h"
#include "../DataStructures/TurnInstructions.h"
#include "../Util/SimpleLogger.h"
#include "../typedefs.h"
#include <osrm/Coordinate.h>
#include <limits>
#include <vector>
/* This class is fed with all way segments in consecutive order
* and produces the description plus the encoded polyline */
class DescriptionFactory
{
DouglasPeucker polyline_generalizer;
PolylineCompressor polyline_compressor;
PhantomNode start_phantom, target_phantom;
double DegreeToRadian(const double degree) const;
double RadianToDegree(const double degree) const;
public:
struct RouteSummary
{
unsigned distance;
EdgeWeight duration;
unsigned source_name_id;
unsigned target_name_id;
RouteSummary() : distance(0), duration(0), source_name_id(0), target_name_id(0) {}
void BuildDurationAndLengthStrings(const double raw_distance, const unsigned raw_duration)
{
// compute distance/duration for route summary
distance = round(raw_distance);
duration = round(raw_duration / 10.);
}
} summary;
double entireLength;
// I know, declaring this public is considered bad. I'm lazy
std::vector<SegmentInformation> path_description;
DescriptionFactory();
JSON::Value AppendUnencodedPolylineString() const;
void AppendSegment(const FixedPointCoordinate &coordinate, const PathData &data);
void BuildRouteSummary(const double distance, const unsigned time);
void SetStartSegment(const PhantomNode &start_phantom);
void SetEndSegment(const PhantomNode &start_phantom);
JSON::Value AppendEncodedPolylineString(const bool return_encoded);
template <class DataFacadeT> void Run(const DataFacadeT *facade, const unsigned zoomLevel)
{
if (path_description.empty())
{
return;
}
/** starts at index 1 */
path_description[0].length = 0;
for (unsigned i = 1; i < path_description.size(); ++i)
{
// move down names by one, q&d hack
path_description[i - 1].name_id = path_description[i].name_id;
path_description[i].length = FixedPointCoordinate::ApproximateEuclideanDistance(
path_description[i - 1].location, path_description[i].location);
}
/*Simplify turn instructions
Input :
10. Turn left on B 36 for 20 km
11. Continue on B 35; B 36 for 2 km
12. Continue on B 36 for 13 km
becomes:
10. Turn left on B 36 for 35 km
*/
// TODO: rework to check only end and start of string.
// stl string is way to expensive
// unsigned lastTurn = 0;
// for(unsigned i = 1; i < path_description.size(); ++i) {
// string1 = sEngine.GetEscapedNameForNameID(path_description[i].name_id);
// if(TurnInstruction::GoStraight == path_description[i].turn_instruction) {
// if(std::string::npos != string0.find(string1+";")
// || std::string::npos != string0.find(";"+string1)
// || std::string::npos != string0.find(string1+" ;")
// || std::string::npos != string0.find("; "+string1)
// ){
// SimpleLogger().Write() << "->next correct: " << string0 << " contains " <<
// string1;
// for(; lastTurn != i; ++lastTurn)
// path_description[lastTurn].name_id = path_description[i].name_id;
// path_description[i].turn_instruction = TurnInstruction::NoTurn;
// } else if(std::string::npos != string1.find(string0+";")
// || std::string::npos != string1.find(";"+string0)
// || std::string::npos != string1.find(string0+" ;")
// || std::string::npos != string1.find("; "+string0)
// ){
// SimpleLogger().Write() << "->prev correct: " << string1 << " contains " <<
// string0;
// path_description[i].name_id = path_description[i-1].name_id;
// path_description[i].turn_instruction = TurnInstruction::NoTurn;
// }
// }
// if (TurnInstruction::NoTurn != path_description[i].turn_instruction) {
// lastTurn = i;
// }
// string0 = string1;
// }
double segment_length = 0.;
unsigned segment_duration = 0;
unsigned segment_start_index = 0;
for (unsigned i = 1; i < path_description.size(); ++i)
{
entireLength += path_description[i].length;
segment_length += path_description[i].length;
segment_duration += path_description[i].duration;
path_description[segment_start_index].length = segment_length;
path_description[segment_start_index].duration = segment_duration;
if (TurnInstruction::NoTurn != path_description[i].turn_instruction)
{
BOOST_ASSERT(path_description[i].necessary);
segment_length = 0;
segment_duration = 0;
segment_start_index = i;
}
}
// Post-processing to remove empty or nearly empty path segments
if (std::numeric_limits<double>::epsilon() > path_description.back().length)
{
if (path_description.size() > 2)
{
path_description.pop_back();
path_description.back().necessary = true;
path_description.back().turn_instruction = TurnInstruction::NoTurn;
target_phantom.name_id = (path_description.end() - 2)->name_id;
}
}
if (std::numeric_limits<double>::epsilon() > path_description.front().length)
{
if (path_description.size() > 2)
{
path_description.erase(path_description.begin());
path_description.front().turn_instruction = TurnInstruction::HeadOn;
path_description.front().necessary = true;
start_phantom.name_id = path_description.front().name_id;
}
}
// Generalize poly line
polyline_generalizer.Run(path_description, zoomLevel);
// fix what needs to be fixed else
for (unsigned i = 0; i < path_description.size() - 1 && path_description.size() >= 2; ++i)
{
if (path_description[i].necessary)
{
const double angle = path_description[i+1].location.GetBearing(path_description[i].location);
path_description[i].bearing = angle * 10;
}
}
return;
}
};
#endif /* DESCRIPTIONFACTORY_H_ */

102
Descriptors/GPXDescriptor.h Normal file
View File

@ -0,0 +1,102 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GPX_DESCRIPTOR_H
#define GPX_DESCRIPTOR_H
#include "BaseDescriptor.h"
template <class DataFacadeT> class GPXDescriptor : public BaseDescriptor<DataFacadeT>
{
private:
DescriptorConfig config;
FixedPointCoordinate current;
DataFacadeT * facade;
void AddRoutePoint(const FixedPointCoordinate & coordinate, std::vector<char> & output)
{
const std::string route_point_head = "<rtept lat=\"";
const std::string route_point_middle = " lon=\"";
const std::string route_point_tail = "\"></rtept>";
std::string tmp;
FixedPointCoordinate::convertInternalLatLonToString(coordinate.lat, tmp);
output.insert(output.end(), route_point_head.begin(), route_point_head.end());
output.insert(output.end(), tmp.begin(), tmp.end());
output.push_back('\"');
FixedPointCoordinate::convertInternalLatLonToString(coordinate.lon, tmp);
output.insert(output.end(), route_point_middle.begin(), route_point_middle.end());
output.insert(output.end(), tmp.begin(), tmp.end());
output.insert(output.end(), route_point_tail.begin(), route_point_tail.end());
}
public:
GPXDescriptor(DataFacadeT *facade) : facade(facade) {}
void SetConfig(const DescriptorConfig &c) { config = c; }
// TODO: reorder parameters
void Run(const RawRouteData &raw_route,
const PhantomNodes &phantom_node_list,
http::Reply &reply)
{
std::string header("<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<gpx creator=\"OSRM Routing Engine\" version=\"1.1\" "
"xmlns=\"http://www.topografix.com/GPX/1/1\" "
"xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" "
"xsi:schemaLocation=\"http://www.topografix.com/GPX/1/1 gpx.xsd"
"\">"
"<metadata><copyright author=\"Project OSRM\"><license>Data (c)"
" OpenStreetMap contributors (ODbL)</license></copyright>"
"</metadata>"
"<rte>");
reply.content.insert(reply.content.end(), header.begin(), header.end());
const bool found_route = (raw_route.shortest_path_length != INVALID_EDGE_WEIGHT) &&
(!raw_route.unpacked_path_segments.front().empty());
if (found_route)
{
AddRoutePoint(phantom_node_list.source_phantom.location, reply.content);
for (const std::vector<PathData> &path_data_vector : raw_route.unpacked_path_segments)
{
for (const PathData &path_data : path_data_vector)
{
const FixedPointCoordinate current_coordinate =
facade->GetCoordinateOfNode(path_data.node);
AddRoutePoint(current_coordinate, reply.content);
}
}
AddRoutePoint(phantom_node_list.target_phantom.location, reply.content);
}
std::string footer("</rte></gpx>");
reply.content.insert(reply.content.end(), footer.begin(), footer.end());
}
};
#endif // GPX_DESCRIPTOR_H

View File

@ -0,0 +1,356 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef JSON_DESCRIPTOR_H_
#define JSON_DESCRIPTOR_H_
#include "BaseDescriptor.h"
#include "DescriptionFactory.h"
#include "../Algorithms/ObjectToBase64.h"
#include "../Algorithms/ExtractRouteNames.h"
#include "../DataStructures/JSONContainer.h"
#include "../DataStructures/SegmentInformation.h"
#include "../DataStructures/TurnInstructions.h"
#include "../Util/Azimuth.h"
#include "../Util/StringUtil.h"
#include "../Util/TimingUtil.h"
#include <algorithm>
template <class DataFacadeT> class JSONDescriptor : public BaseDescriptor<DataFacadeT>
{
private:
DataFacadeT *facade;
DescriptorConfig config;
DescriptionFactory description_factory, alternate_description_factory;
FixedPointCoordinate current;
unsigned entered_restricted_area_count;
struct RoundAbout
{
RoundAbout() : start_index(INT_MAX), name_id(INT_MAX), leave_at_exit(INT_MAX) {}
int start_index;
int name_id;
int leave_at_exit;
} round_about;
struct Segment
{
Segment() : name_id(-1), length(-1), position(-1) {}
Segment(int n, int l, int p) : name_id(n), length(l), position(p) {}
int name_id;
int length;
int position;
};
std::vector<Segment> shortest_path_segments, alternative_path_segments;
std::vector<unsigned> shortest_leg_end_indices, alternative_leg_end_indices;
ExtractRouteNames<DataFacadeT, Segment> GenerateRouteNames;
public:
JSONDescriptor(DataFacadeT *facade) : facade(facade), entered_restricted_area_count(0)
{
shortest_leg_end_indices.emplace_back(0);
alternative_leg_end_indices.emplace_back(0);
}
void SetConfig(const DescriptorConfig &c) { config = c; }
unsigned DescribeLeg(const std::vector<PathData> route_leg, const PhantomNodes &leg_phantoms)
{
unsigned added_element_count = 0;
// Get all the coordinates for the computed route
FixedPointCoordinate current_coordinate;
for (const PathData &path_data : route_leg)
{
current_coordinate = facade->GetCoordinateOfNode(path_data.node);
description_factory.AppendSegment(current_coordinate, path_data);
++added_element_count;
}
++added_element_count;
BOOST_ASSERT((route_leg.size() + 1) == added_element_count);
return added_element_count;
}
void Run(const RawRouteData &raw_route,
const PhantomNodes &phantom_nodes,
http::Reply &reply)
{
JSON::Object json_result;
if (INVALID_EDGE_WEIGHT == raw_route.shortest_path_length)
{
// We do not need to do much, if there is no route ;-)
json_result.values["status"] = 207;
json_result.values["status_message"] = "Cannot find route between points";
JSON::render(reply.content, json_result);
return;
}
// check if first segment is non-zero
std::string road_name =
facade->GetEscapedNameForNameID(phantom_nodes.source_phantom.name_id);
BOOST_ASSERT(raw_route.unpacked_path_segments.size() ==
raw_route.segment_end_coordinates.size());
description_factory.SetStartSegment(phantom_nodes.source_phantom);
json_result.values["status"] = 0;
json_result.values["status_message"] = "Found route between points";
// for each unpacked segment add the leg to the description
for (unsigned i = 0; i < raw_route.unpacked_path_segments.size(); ++i)
{
const int added_segments = DescribeLeg(raw_route.unpacked_path_segments[i],
raw_route.segment_end_coordinates[i]);
BOOST_ASSERT(0 < added_segments);
shortest_leg_end_indices.emplace_back(added_segments + shortest_leg_end_indices.back());
}
description_factory.SetEndSegment(phantom_nodes.target_phantom);
description_factory.Run(facade, config.zoom_level);
if (config.geometry)
{
JSON::Value route_geometry = description_factory.AppendEncodedPolylineString(config.encode_geometry);
json_result.values["route_geometry"] = route_geometry;
}
if (config.instructions)
{
JSON::Array json_route_instructions;
BuildTextualDescription(description_factory,
json_route_instructions,
raw_route.shortest_path_length,
shortest_path_segments);
json_result.values["route_instructions"] = json_route_instructions;
}
description_factory.BuildRouteSummary(description_factory.entireLength,
raw_route.shortest_path_length);
JSON::Object json_route_summary;
json_route_summary.values["total_distance"] = description_factory.summary.distance;
json_route_summary.values["total_time"] = description_factory.summary.duration;
json_route_summary.values["start_point"] = facade->GetEscapedNameForNameID(description_factory.summary.source_name_id);
json_route_summary.values["end_point"] = facade->GetEscapedNameForNameID(description_factory.summary.target_name_id);
json_result.values["route_summary"] = json_route_summary;
BOOST_ASSERT(!raw_route.segment_end_coordinates.empty());
JSON::Array json_via_points_array;
JSON::Array json_first_coordinate;
json_first_coordinate.values.push_back(raw_route.segment_end_coordinates.front().source_phantom.location.lat/COORDINATE_PRECISION);
json_first_coordinate.values.push_back(raw_route.segment_end_coordinates.front().source_phantom.location.lon/COORDINATE_PRECISION);
json_via_points_array.values.push_back(json_first_coordinate);
for (const PhantomNodes &nodes : raw_route.segment_end_coordinates)
{
std::string tmp;
JSON::Array json_coordinate;
json_coordinate.values.push_back(nodes.target_phantom.location.lat/COORDINATE_PRECISION);
json_coordinate.values.push_back(nodes.target_phantom.location.lon/COORDINATE_PRECISION);
json_via_points_array.values.push_back(json_coordinate);
}
json_result.values["via_points"] = json_via_points_array;
JSON::Array json_via_indices_array;
json_via_indices_array.values.insert(json_via_indices_array.values.end(), shortest_leg_end_indices.begin(), shortest_leg_end_indices.end());
json_result.values["via_indices"] = json_via_indices_array;
// only one alternative route is computed at this time, so this is hardcoded
if (INVALID_EDGE_WEIGHT != raw_route.alternative_path_length)
{
json_result.values["found_alternative"] = JSON::True();
alternate_description_factory.SetStartSegment(phantom_nodes.source_phantom);
// Get all the coordinates for the computed route
for (const PathData &path_data : raw_route.unpacked_alternative)
{
current = facade->GetCoordinateOfNode(path_data.node);
alternate_description_factory.AppendSegment(current, path_data);
}
alternate_description_factory.Run(facade, config.zoom_level);
if (config.geometry)
{
JSON::Value alternate_geometry_string = alternate_description_factory.AppendEncodedPolylineString(config.encode_geometry);
JSON::Array json_alternate_geometries_array;
json_alternate_geometries_array.values.push_back(alternate_geometry_string);
json_result.values["alternative_geometries"] = json_alternate_geometries_array;
}
// Generate instructions for each alternative (simulated here)
JSON::Array json_alt_instructions;
JSON::Array json_current_alt_instructions;
if (config.instructions)
{
BuildTextualDescription(alternate_description_factory,
json_current_alt_instructions,
raw_route.alternative_path_length,
alternative_path_segments);
json_alt_instructions.values.push_back(json_current_alt_instructions);
json_result.values["alternative_instructions"] = json_alt_instructions;
}
alternate_description_factory.BuildRouteSummary(
alternate_description_factory.entireLength, raw_route.alternative_path_length);
JSON::Object json_alternate_route_summary;
JSON::Array json_alternate_route_summary_array;
json_alternate_route_summary.values["total_distance"] = alternate_description_factory.summary.distance;
json_alternate_route_summary.values["total_time"] = alternate_description_factory.summary.duration;
json_alternate_route_summary.values["start_point"] = facade->GetEscapedNameForNameID(alternate_description_factory.summary.source_name_id);
json_alternate_route_summary.values["end_point"] = facade->GetEscapedNameForNameID(alternate_description_factory.summary.target_name_id);
json_alternate_route_summary_array.values.push_back(json_alternate_route_summary);
json_result.values["alternative_summaries"] = json_alternate_route_summary_array;
JSON::Array json_altenative_indices_array;
json_altenative_indices_array.values.push_back(0);
json_altenative_indices_array.values.push_back(alternate_description_factory.path_description.size());
json_result.values["alternative_indices"] = json_altenative_indices_array;
} else {
json_result.values["found_alternative"] = JSON::False();
}
// Get Names for both routes
RouteNames route_names = GenerateRouteNames(shortest_path_segments, alternative_path_segments, facade);
JSON::Array json_route_names;
json_route_names.values.push_back(route_names.shortest_path_name_1);
json_route_names.values.push_back(route_names.shortest_path_name_2);
json_result.values["route_name"] = json_route_names;
if (INVALID_EDGE_WEIGHT != raw_route.alternative_path_length)
{
JSON::Array json_alternate_names_array;
JSON::Array json_alternate_names;
json_alternate_names.values.push_back(route_names.alternative_path_name_1);
json_alternate_names.values.push_back(route_names.alternative_path_name_2);
json_alternate_names_array.values.push_back(json_alternate_names);
json_result.values["alternative_names"] = json_alternate_names_array;
}
JSON::Object json_hint_object;
json_hint_object.values["checksum"] = raw_route.check_sum;
JSON::Array json_location_hint_array;
std::string hint;
for (unsigned i = 0; i < raw_route.segment_end_coordinates.size(); ++i)
{
EncodeObjectToBase64(raw_route.segment_end_coordinates[i].source_phantom, hint);
json_location_hint_array.values.push_back(hint);
}
EncodeObjectToBase64(raw_route.segment_end_coordinates.back().target_phantom, hint);
json_location_hint_array.values.push_back(hint);
json_hint_object.values["locations"] = json_location_hint_array;
json_result.values["hint_data"] = json_hint_object;
// render the content to the output array
TIMER_START(route_render);
JSON::render(reply.content, json_result);
TIMER_STOP(route_render);
SimpleLogger().Write(logDEBUG) << "rendering took: " << TIMER_MSEC(route_render);
}
// TODO: reorder parameters
inline void BuildTextualDescription(DescriptionFactory &description_factory,
JSON::Array & json_instruction_array,
const int route_length,
std::vector<Segment> &route_segments_list)
{
// Segment information has following format:
//["instruction id","streetname",length,position,time,"length","earth_direction",azimuth]
unsigned necessary_segments_running_index = 0;
round_about.leave_at_exit = 0;
round_about.name_id = 0;
std::string temp_dist, temp_length, temp_duration, temp_bearing, temp_instruction;
// Fetch data from Factory and generate a string from it.
for (const SegmentInformation &segment : description_factory.path_description)
{
JSON::Array json_instruction_row;
TurnInstruction current_instruction = segment.turn_instruction;
entered_restricted_area_count += (current_instruction != segment.turn_instruction);
if (TurnInstructionsClass::TurnIsNecessary(current_instruction))
{
if (TurnInstruction::EnterRoundAbout == current_instruction)
{
round_about.name_id = segment.name_id;
round_about.start_index = necessary_segments_running_index;
}
else
{
std::string current_turn_instruction;
if (TurnInstruction::LeaveRoundAbout == current_instruction)
{
temp_instruction = IntToString(as_integer(TurnInstruction::EnterRoundAbout));
current_turn_instruction += temp_instruction;
current_turn_instruction += "-";
temp_instruction = IntToString(round_about.leave_at_exit + 1);
current_turn_instruction += temp_instruction;
round_about.leave_at_exit = 0;
}
else
{
temp_instruction = IntToString(as_integer(current_instruction));
current_turn_instruction += temp_instruction;
}
json_instruction_row.values.push_back(current_turn_instruction);
json_instruction_row.values.push_back(facade->GetEscapedNameForNameID(segment.name_id));
json_instruction_row.values.push_back(std::round(segment.length));
json_instruction_row.values.push_back(necessary_segments_running_index);
json_instruction_row.values.push_back(round(segment.duration / 10));
json_instruction_row.values.push_back(IntToString(segment.length)+"m");
int bearing_value = round(segment.bearing / 10.);
json_instruction_row.values.push_back(Azimuth::Get(bearing_value));
json_instruction_row.values.push_back(bearing_value);
route_segments_list.emplace_back(
segment.name_id, segment.length, route_segments_list.size());
json_instruction_array.values.push_back(json_instruction_row);
}
}
else if (TurnInstruction::StayOnRoundAbout == current_instruction)
{
++round_about.leave_at_exit;
}
if (segment.necessary)
{
++necessary_segments_running_index;
}
}
//TODO: check if this in an invariant
if (INVALID_EDGE_WEIGHT != route_length)
{
JSON::Array json_last_instruction_row;
temp_instruction = IntToString(as_integer(TurnInstruction::ReachedYourDestination));
json_last_instruction_row.values.push_back(temp_instruction);
json_last_instruction_row.values.push_back("");
json_last_instruction_row.values.push_back(0);
json_last_instruction_row.values.push_back(necessary_segments_running_index - 1);
json_last_instruction_row.values.push_back(0);
json_last_instruction_row.values.push_back("0m");
json_last_instruction_row.values.push_back(Azimuth::Get(0.0));
json_last_instruction_row.values.push_back(0.);
json_instruction_array.values.push_back(json_last_instruction_row);
}
}
};
#endif /* JSON_DESCRIPTOR_H_ */

3
Docs/webclient.txt Normal file
View File

@ -0,0 +1,3 @@
The javascript based web client is a seperate project available at
https://github.com/DennisSchiefer/Project-OSRM-Web

View File

@ -1,44 +0,0 @@
PROJECT_NAME = "Project OSRM"
PROJECT_BRIEF = "Open Source Routing Machine"
BUILTIN_STL_SUPPORT = YES
EXTRACT_ALL = YES
EXTRACT_PRIVATE = YES
EXTRACT_PACKAGE = YES
EXTRACT_STATIC = YES
EXTRACT_LOCAL_CLASSES = YES
EXTRACT_ANON_NSPACES = YES
QUIET = YES
INPUT = @CMAKE_CURRENT_SOURCE_DIR@
USE_MDFILE_AS_MAINPAGE = @CMAKE_CURRENT_SOURCE_DIR@/README.md
FILE_PATTERNS = *.h *.hpp *.c *.cc *.cpp *.md
RECURSIVE = YES
EXCLUDE = @CMAKE_CURRENT_SOURCE_DIR@/third_party \
@CMAKE_CURRENT_SOURCE_DIR@/build \
@CMAKE_CURRENT_SOURCE_DIR@/node_modules \
@CMAKE_CURRENT_SOURCE_DIR@/unit_tests \
@CMAKE_CURRENT_SOURCE_DIR@/benchmarks \
@CMAKE_CURRENT_SOURCE_DIR@/features
SOURCE_BROWSER = YES
CLANG_ASSISTED_PARSING = NO
HTML_COLORSTYLE_HUE = 217
HTML_COLORSTYLE_SAT = 71
HTML_COLORSTYLE_GAMMA = 50
GENERATE_TREEVIEW = YES
HAVE_DOT = @DOXYGEN_DOT_FOUND@
CALL_GRAPH = YES
CALLER_GRAPH = YES
DOT_IMAGE_FORMAT = svg
INTERACTIVE_SVG = YES
DOT_GRAPH_MAX_NODES = 500
DOT_TRANSPARENT = YES
DOT_MULTI_TARGETS = YES

139
Extractor/BaseParser.cpp Normal file
View File

@ -0,0 +1,139 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "BaseParser.h"
#include "ExtractionWay.h"
#include "ScriptingEnvironment.h"
#include "../DataStructures/ImportNode.h"
#include "../Util/LuaUtil.h"
#include "../Util/OSRMException.h"
#include "../Util/SimpleLogger.h"
#include <boost/algorithm/string.hpp>
#include <boost/algorithm/string/regex.hpp>
#include <boost/ref.hpp>
#include <boost/regex.hpp>
BaseParser::BaseParser(ExtractorCallbacks *extractor_callbacks,
ScriptingEnvironment &scripting_environment)
: extractor_callbacks(extractor_callbacks),
lua_state(scripting_environment.getLuaStateForThreadID(0)),
scripting_environment(scripting_environment), use_turn_restrictions(true)
{
ReadUseRestrictionsSetting();
ReadRestrictionExceptions();
}
void BaseParser::ReadUseRestrictionsSetting()
{
if (0 != luaL_dostring(lua_state, "return use_turn_restrictions\n"))
{
throw OSRMException("ERROR occured in scripting block");
}
if (lua_isboolean(lua_state, -1))
{
use_turn_restrictions = lua_toboolean(lua_state, -1);
}
if (use_turn_restrictions)
{
SimpleLogger().Write() << "Using turn restrictions";
}
else
{
SimpleLogger().Write() << "Ignoring turn restrictions";
}
}
void BaseParser::ReadRestrictionExceptions()
{
if (lua_function_exists(lua_state, "get_exceptions"))
{
// get list of turn restriction exceptions
luabind::call_function<void>(
lua_state, "get_exceptions", boost::ref(restriction_exceptions));
const unsigned exception_count = restriction_exceptions.size();
SimpleLogger().Write() << "Found " << exception_count
<< " exceptions to turn restrictions:";
for (const std::string &str : restriction_exceptions)
{
SimpleLogger().Write() << " " << str;
}
}
else
{
SimpleLogger().Write() << "Found no exceptions to turn restrictions";
}
}
void BaseParser::report_errors(lua_State *lua_state, const int status) const
{
if (0 != status)
{
std::cerr << "-- " << lua_tostring(lua_state, -1) << std::endl;
lua_pop(lua_state, 1); // remove error message
}
}
void BaseParser::ParseNodeInLua(ImportNode &node, lua_State *local_lua_state)
{
luabind::call_function<void>(local_lua_state, "node_function", boost::ref(node));
}
void BaseParser::ParseWayInLua(ExtractionWay &way, lua_State *local_lua_state)
{
luabind::call_function<void>(local_lua_state, "way_function", boost::ref(way));
}
bool BaseParser::ShouldIgnoreRestriction(const std::string &except_tag_string) const
{
// should this restriction be ignored? yes if there's an overlap between:
// a) the list of modes in the except tag of the restriction
// (except_tag_string), eg: except=bus;bicycle
// b) the lua profile defines a hierachy of modes,
// eg: [access, vehicle, bicycle]
if (except_tag_string.empty())
{
return false;
}
// Be warned, this is quadratic work here, but we assume that
// only a few exceptions are actually defined.
std::vector<std::string> exceptions;
boost::algorithm::split_regex(exceptions, except_tag_string, boost::regex("[;][ ]*"));
for (std::string &current_string : exceptions)
{
const auto string_iterator =
std::find(restriction_exceptions.begin(), restriction_exceptions.end(), current_string);
if (restriction_exceptions.end() != string_iterator)
{
return true;
}
}
return false;
}

67
Extractor/BaseParser.h Normal file
View File

@ -0,0 +1,67 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef BASEPARSER_H_
#define BASEPARSER_H_
#include <string>
#include <vector>
struct lua_State;
class ExtractorCallbacks;
class ScriptingEnvironment;
struct ExtractionWay;
struct ImportNode;
class BaseParser
{
public:
BaseParser() = delete;
BaseParser(const BaseParser &) = delete;
BaseParser(ExtractorCallbacks *extractor_callbacks,
ScriptingEnvironment &scripting_environment);
virtual ~BaseParser() {}
virtual bool ReadHeader() = 0;
virtual bool Parse() = 0;
virtual void ParseNodeInLua(ImportNode &node, lua_State *lua_state);
virtual void ParseWayInLua(ExtractionWay &way, lua_State *lua_state);
virtual void report_errors(lua_State *lua_state, const int status) const;
protected:
virtual void ReadUseRestrictionsSetting();
virtual void ReadRestrictionExceptions();
virtual bool ShouldIgnoreRestriction(const std::string &except_tag_string) const;
ExtractorCallbacks *extractor_callbacks;
lua_State *lua_state;
ScriptingEnvironment &scripting_environment;
std::vector<std::string> restriction_exceptions;
bool use_turn_restrictions;
};
#endif /* BASEPARSER_H_ */

View File

@ -0,0 +1,451 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "ExtractionContainers.h"
#include "ExtractionWay.h"
#include "../Util/OSRMException.h"
#include "../Util/SimpleLogger.h"
#include <boost/assert.hpp>
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
#include <stxxl/sort>
#include <chrono>
#include <limits>
ExtractionContainers::ExtractionContainers()
{
// Check if stxxl can be instantiated
stxxl::vector<unsigned> dummy_vector;
name_list.push_back("");
}
ExtractionContainers::~ExtractionContainers()
{
used_node_id_list.clear();
all_nodes_list.clear();
all_edges_list.clear();
name_list.clear();
restrictions_list.clear();
way_start_end_id_list.clear();
}
void ExtractionContainers::PrepareData(const std::string &output_file_name,
const std::string &restrictions_file_name)
{
try
{
unsigned number_of_used_nodes = 0;
unsigned number_of_used_edges = 0;
std::chrono::time_point<std::chrono::steady_clock> time1 = std::chrono::steady_clock::now();
std::cout << "[extractor] Sorting used nodes ... " << std::flush;
stxxl::sort(used_node_id_list.begin(), used_node_id_list.end(), Cmp(), stxxl_memory);
std::chrono::time_point<std::chrono::steady_clock> time2 = std::chrono::steady_clock::now();
std::chrono::duration<double> elapsed_seconds = time2 - time1;
std::cout << "ok, after " << elapsed_seconds.count() << "s" << std::endl;
time1 = std::chrono::steady_clock::now();
std::cout << "[extractor] Erasing duplicate nodes ... " << std::flush;
auto new_end = std::unique(used_node_id_list.begin(), used_node_id_list.end());
used_node_id_list.resize(new_end - used_node_id_list.begin());
time2 = std::chrono::steady_clock::now();
elapsed_seconds = time2 - time1;
std::cout << "ok, after " << elapsed_seconds.count() << "s" << std::endl;
time1 = std::chrono::steady_clock::now();
std::cout << "[extractor] Sorting all nodes ... " << std::flush;
stxxl::sort(all_nodes_list.begin(), all_nodes_list.end(), CmpNodeByID(), stxxl_memory);
time2 = std::chrono::steady_clock::now();
elapsed_seconds = time2 - time1;
std::cout << "ok, after " << elapsed_seconds.count() << "s" << std::endl;
time1 = std::chrono::steady_clock::now();
std::cout << "[extractor] Sorting used ways ... " << std::flush;
stxxl::sort(
way_start_end_id_list.begin(), way_start_end_id_list.end(), CmpWayByID(), stxxl_memory);
time2 = std::chrono::steady_clock::now();
elapsed_seconds = time2 - time1;
std::cout << "ok, after " << elapsed_seconds.count() << "s" << std::endl;
std::cout << "[extractor] Sorting restrictions. by from... " << std::flush;
stxxl::sort(restrictions_list.begin(),
restrictions_list.end(),
CmpRestrictionContainerByFrom(),
stxxl_memory);
time2 = std::chrono::steady_clock::now();
elapsed_seconds = time2 - time1;
std::cout << "ok, after " << elapsed_seconds.count() << "s" << std::endl;
std::cout << "[extractor] Fixing restriction starts ... " << std::flush;
auto restrictions_iterator = restrictions_list.begin();
auto way_start_and_end_iterator = way_start_end_id_list.begin();
while (way_start_and_end_iterator != way_start_end_id_list.end() &&
restrictions_iterator != restrictions_list.end())
{
if (way_start_and_end_iterator->wayID < restrictions_iterator->fromWay)
{
++way_start_and_end_iterator;
continue;
}
if (way_start_and_end_iterator->wayID > restrictions_iterator->fromWay)
{
++restrictions_iterator;
continue;
}
BOOST_ASSERT(way_start_and_end_iterator->wayID == restrictions_iterator->fromWay);
const NodeID via_node_id = restrictions_iterator->restriction.viaNode;
if (way_start_and_end_iterator->firstStart == via_node_id)
{
restrictions_iterator->restriction.fromNode =
way_start_and_end_iterator->firstTarget;
}
else if (way_start_and_end_iterator->firstTarget == via_node_id)
{
restrictions_iterator->restriction.fromNode =
way_start_and_end_iterator->firstStart;
}
else if (way_start_and_end_iterator->lastStart == via_node_id)
{
restrictions_iterator->restriction.fromNode =
way_start_and_end_iterator->lastTarget;
}
else if (way_start_and_end_iterator->lastTarget == via_node_id)
{
restrictions_iterator->restriction.fromNode = way_start_and_end_iterator->lastStart;
}
++restrictions_iterator;
}
time2 = std::chrono::steady_clock::now();
elapsed_seconds = time2 - time1;
std::cout << "ok, after " << elapsed_seconds.count() << "s" << std::endl;
time1 = std::chrono::steady_clock::now();
std::cout << "[extractor] Sorting restrictions. by to ... " << std::flush;
stxxl::sort(restrictions_list.begin(),
restrictions_list.end(),
CmpRestrictionContainerByTo(),
stxxl_memory);
time2 = std::chrono::steady_clock::now();
elapsed_seconds = time2 - time1;
std::cout << "ok, after " << elapsed_seconds.count() << "s" << std::endl;
time1 = std::chrono::steady_clock::now();
unsigned number_of_useable_restrictions = 0;
std::cout << "[extractor] Fixing restriction ends ... " << std::flush;
restrictions_iterator = restrictions_list.begin();
way_start_and_end_iterator = way_start_end_id_list.begin();
while (way_start_and_end_iterator != way_start_end_id_list.end() &&
restrictions_iterator != restrictions_list.end())
{
if (way_start_and_end_iterator->wayID < restrictions_iterator->toWay)
{
++way_start_and_end_iterator;
continue;
}
if (way_start_and_end_iterator->wayID > restrictions_iterator->toWay)
{
++restrictions_iterator;
continue;
}
NodeID via_node_id = restrictions_iterator->restriction.viaNode;
if (way_start_and_end_iterator->lastStart == via_node_id)
{
restrictions_iterator->restriction.toNode = way_start_and_end_iterator->lastTarget;
}
else if (way_start_and_end_iterator->lastTarget == via_node_id)
{
restrictions_iterator->restriction.toNode = way_start_and_end_iterator->lastStart;
}
else if (way_start_and_end_iterator->firstStart == via_node_id)
{
restrictions_iterator->restriction.toNode = way_start_and_end_iterator->firstTarget;
}
else if (way_start_and_end_iterator->firstTarget == via_node_id)
{
restrictions_iterator->restriction.toNode = way_start_and_end_iterator->firstStart;
}
if (std::numeric_limits<unsigned>::max() != restrictions_iterator->restriction.fromNode &&
std::numeric_limits<unsigned>::max() != restrictions_iterator->restriction.toNode)
{
++number_of_useable_restrictions;
}
++restrictions_iterator;
}
time2 = std::chrono::steady_clock::now();
elapsed_seconds = time2 - time1;
std::cout << "ok, after " << elapsed_seconds.count() << "s" << std::endl;
SimpleLogger().Write() << "usable restrictions: " << number_of_useable_restrictions;
// serialize restrictions
std::ofstream restrictions_out_stream;
restrictions_out_stream.open(restrictions_file_name.c_str(), std::ios::binary);
restrictions_out_stream.write((char *)&uuid, sizeof(UUID));
restrictions_out_stream.write((char *)&number_of_useable_restrictions, sizeof(unsigned));
// for (restrictions_iterator = restrictions_list.begin();
// restrictions_iterator != restrictions_list.end();
// ++restrictions_iterator)
for(const auto & restriction_container : restrictions_list)
{
if (std::numeric_limits<unsigned>::max() != restriction_container.restriction.fromNode &&
std::numeric_limits<unsigned>::max() != restriction_container.restriction.toNode)
{
restrictions_out_stream.write((char *)&(restriction_container.restriction),
sizeof(TurnRestriction));
}
}
restrictions_out_stream.close();
std::ofstream file_out_stream;
file_out_stream.open(output_file_name.c_str(), std::ios::binary);
file_out_stream.write((char *)&uuid, sizeof(UUID));
file_out_stream.write((char *)&number_of_used_nodes, sizeof(unsigned));
time1 = std::chrono::steady_clock::now();
std::cout << "[extractor] Confirming/Writing used nodes ... " << std::flush;
// identify all used nodes by a merging step of two sorted lists
auto node_iterator = all_nodes_list.begin();
auto node_id_iterator = used_node_id_list.begin();
while (node_id_iterator != used_node_id_list.end() && node_iterator != all_nodes_list.end())
{
if (*node_id_iterator < node_iterator->id)
{
++node_id_iterator;
continue;
}
if (*node_id_iterator > node_iterator->id)
{
++node_iterator;
continue;
}
BOOST_ASSERT(*node_id_iterator == node_iterator->id);
file_out_stream.write((char *)&(*node_iterator), sizeof(ExternalMemoryNode));
++number_of_used_nodes;
++node_id_iterator;
++node_iterator;
}
time2 = std::chrono::steady_clock::now();
elapsed_seconds = time2 - time1;
std::cout << "ok, after " << elapsed_seconds.count() << "s" << std::endl;
std::cout << "[extractor] setting number of nodes ... " << std::flush;
std::ios::pos_type previous_file_position = file_out_stream.tellp();
file_out_stream.seekp(std::ios::beg + sizeof(UUID));
file_out_stream.write((char *)&number_of_used_nodes, sizeof(unsigned));
file_out_stream.seekp(previous_file_position);
std::cout << "ok" << std::endl;
time1 = std::chrono::steady_clock::now();
// Sort edges by start.
std::cout << "[extractor] Sorting edges by start ... " << std::flush;
stxxl::sort(all_edges_list.begin(), all_edges_list.end(), CmpEdgeByStartID(), stxxl_memory);
time2 = std::chrono::steady_clock::now();
elapsed_seconds = time2 - time1;
std::cout << "ok, after " << elapsed_seconds.count() << "s" << std::endl;
time1 = std::chrono::steady_clock::now();
std::cout << "[extractor] Setting start coords ... " << std::flush;
file_out_stream.write((char *)&number_of_used_edges, sizeof(unsigned));
// Traverse list of edges and nodes in parallel and set start coord
node_iterator = all_nodes_list.begin();
auto edge_iterator = all_edges_list.begin();
while (edge_iterator != all_edges_list.end() && node_iterator != all_nodes_list.end())
{
if (edge_iterator->start < node_iterator->id)
{
++edge_iterator;
continue;
}
if (edge_iterator->start > node_iterator->id)
{
node_iterator++;
continue;
}
BOOST_ASSERT(edge_iterator->start == node_iterator->id);
edge_iterator->source_coordinate.lat = node_iterator->lat;
edge_iterator->source_coordinate.lon = node_iterator->lon;
++edge_iterator;
}
time2 = std::chrono::steady_clock::now();
elapsed_seconds = time2 - time1;
std::cout << "ok, after " << elapsed_seconds.count() << "s" << std::endl;
time1 = std::chrono::steady_clock::now();
// Sort Edges by target
std::cout << "[extractor] Sorting edges by target ... " << std::flush;
stxxl::sort(all_edges_list.begin(), all_edges_list.end(), CmpEdgeByTargetID(), stxxl_memory);
time2 = std::chrono::steady_clock::now();
elapsed_seconds = time2 - time1;
std::cout << "ok, after " << elapsed_seconds.count() << "s" << std::endl;
time1 = std::chrono::steady_clock::now();
std::cout << "[extractor] Setting target coords ... " << std::flush;
// Traverse list of edges and nodes in parallel and set target coord
node_iterator = all_nodes_list.begin();
edge_iterator = all_edges_list.begin();
while (edge_iterator != all_edges_list.end() && node_iterator != all_nodes_list.end())
{
if (edge_iterator->target < node_iterator->id)
{
++edge_iterator;
continue;
}
if (edge_iterator->target > node_iterator->id)
{
++node_iterator;
continue;
}
BOOST_ASSERT(edge_iterator->target == node_iterator->id);
if (edge_iterator->source_coordinate.lat != std::numeric_limits<int>::min() &&
edge_iterator->source_coordinate.lon != std::numeric_limits<int>::min())
{
BOOST_ASSERT(edge_iterator->speed != -1);
BOOST_ASSERT(edge_iterator->type >= 0);
edge_iterator->target_coordinate.lat = node_iterator->lat;
edge_iterator->target_coordinate.lon = node_iterator->lon;
const double distance = FixedPointCoordinate::ApproximateEuclideanDistance(
edge_iterator->source_coordinate.lat,
edge_iterator->source_coordinate.lon,
node_iterator->lat,
node_iterator->lon);
const double weight = (distance * 10.) / (edge_iterator->speed / 3.6);
int integer_weight = std::max(
1,
(int)std::floor(
(edge_iterator->is_duration_set ? edge_iterator->speed : weight) + .5));
int integer_distance = std::max(1, (int)distance);
short zero = 0;
short one = 1;
file_out_stream.write((char *)&edge_iterator->start, sizeof(unsigned));
file_out_stream.write((char *)&edge_iterator->target, sizeof(unsigned));
file_out_stream.write((char *)&integer_distance, sizeof(int));
switch (edge_iterator->direction)
{
case ExtractionWay::notSure:
file_out_stream.write((char *)&zero, sizeof(short));
break;
case ExtractionWay::oneway:
file_out_stream.write((char *)&one, sizeof(short));
break;
case ExtractionWay::bidirectional:
file_out_stream.write((char *)&zero, sizeof(short));
break;
case ExtractionWay::opposite:
file_out_stream.write((char *)&one, sizeof(short));
break;
default:
throw OSRMException("edge has broken direction");
}
file_out_stream.write((char *)&integer_weight, sizeof(int));
file_out_stream.write((char *)&edge_iterator->type, sizeof(short));
file_out_stream.write((char *)&edge_iterator->name_id, sizeof(unsigned));
file_out_stream.write((char *)&edge_iterator->is_roundabout, sizeof(bool));
file_out_stream.write((char *)&edge_iterator->is_in_tiny_cc, sizeof(bool));
file_out_stream.write((char *)&edge_iterator->is_access_restricted, sizeof(bool));
file_out_stream.write((char *)&edge_iterator->is_contra_flow, sizeof(bool));
file_out_stream.write((char *)&edge_iterator->is_split, sizeof(bool));
++number_of_used_edges;
}
++edge_iterator;
}
time2 = std::chrono::steady_clock::now();
elapsed_seconds = time2 - time1;
std::cout << "ok, after " << elapsed_seconds.count() << "s" << std::endl;
std::cout << "[extractor] setting number of edges ... " << std::flush;
file_out_stream.seekp(previous_file_position);
file_out_stream.write((char *)&number_of_used_edges, sizeof(unsigned));
file_out_stream.close();
std::cout << "ok" << std::endl;
time1 = std::chrono::steady_clock::now();
std::cout << "[extractor] writing street name index ... " << std::flush;
std::string name_file_streamName = (output_file_name + ".names");
boost::filesystem::ofstream name_file_stream(name_file_streamName, std::ios::binary);
// write number of names
const unsigned number_of_names = name_list.size() + 1;
name_file_stream.write((char *)&(number_of_names), sizeof(unsigned));
// compute total number of chars
unsigned total_number_of_chars = 0;
for (const std::string &temp_string : name_list)
{
total_number_of_chars += temp_string.length();
}
// write total number of chars
name_file_stream.write((char *)&(total_number_of_chars), sizeof(unsigned));
// write prefixe sums
unsigned name_lengths_prefix_sum = 0;
for (const std::string &temp_string : name_list)
{
name_file_stream.write((char *)&(name_lengths_prefix_sum), sizeof(unsigned));
name_lengths_prefix_sum += temp_string.length();
}
// duplicate on purpose!
name_file_stream.write((char *)&(name_lengths_prefix_sum), sizeof(unsigned));
// write all chars consecutively
for (const std::string &temp_string : name_list)
{
const unsigned string_length = temp_string.length();
name_file_stream.write(temp_string.c_str(), string_length);
}
name_file_stream.close();
time2 = std::chrono::steady_clock::now();
elapsed_seconds = time2 - time1;
std::cout << "ok, after " << elapsed_seconds.count() << "s" << std::endl;
SimpleLogger().Write() << "Processed " << number_of_used_nodes << " nodes and "
<< number_of_used_edges << " edges";
}
catch (const std::exception &e) { std::cerr << "Caught Execption:" << e.what() << std::endl; }
}

View File

@ -0,0 +1,65 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef EXTRACTIONCONTAINERS_H_
#define EXTRACTIONCONTAINERS_H_
#include "InternalExtractorEdge.h"
#include "ExtractorStructs.h"
#include "../DataStructures/Restriction.h"
#include "../Util/UUID.h"
#include <stxxl/vector>
class ExtractionContainers
{
constexpr static unsigned stxxl_memory = ((sizeof(std::size_t) == 4) ? std::numeric_limits<int>::max() : std::numeric_limits<unsigned>::max());
public:
typedef stxxl::vector<NodeID> STXXLNodeIDVector;
typedef stxxl::vector<ExternalMemoryNode> STXXLNodeVector;
typedef stxxl::vector<InternalExtractorEdge> STXXLEdgeVector;
typedef stxxl::vector<std::string> STXXLStringVector;
typedef stxxl::vector<InputRestrictionContainer> STXXLRestrictionsVector;
typedef stxxl::vector<WayIDStartAndEndEdge> STXXLWayIDStartEndVector;
STXXLNodeIDVector used_node_id_list;
STXXLNodeVector all_nodes_list;
STXXLEdgeVector all_edges_list;
STXXLStringVector name_list;
STXXLRestrictionsVector restrictions_list;
STXXLWayIDStartEndVector way_start_end_id_list;
const UUID uuid;
ExtractionContainers();
virtual ~ExtractionContainers();
void PrepareData(const std::string &output_file_name,
const std::string &restrictions_file_name);
};
#endif /* EXTRACTIONCONTAINERS_H_ */

View File

@ -0,0 +1,89 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef EXTRACTION_HELPER_FUNCTIONS_H
#define EXTRACTION_HELPER_FUNCTIONS_H
#include "../Util/StringUtil.h"
#include <boost/algorithm/string.hpp>
#include <boost/algorithm/string_regex.hpp>
#include <boost/regex.hpp>
#include <limits>
namespace qi = boost::spirit::qi;
// TODO: Move into LUA
inline bool durationIsValid(const std::string &s)
{
boost::regex e(
"((\\d|\\d\\d):(\\d|\\d\\d):(\\d|\\d\\d))|((\\d|\\d\\d):(\\d|\\d\\d))|(\\d|\\d\\d)",
boost::regex_constants::icase | boost::regex_constants::perl);
std::vector<std::string> result;
boost::algorithm::split_regex(result, s, boost::regex(":"));
bool matched = regex_match(s, e);
return matched;
}
inline unsigned parseDuration(const std::string &s)
{
unsigned hours = 0;
unsigned minutes = 0;
unsigned seconds = 0;
boost::regex e(
"((\\d|\\d\\d):(\\d|\\d\\d):(\\d|\\d\\d))|((\\d|\\d\\d):(\\d|\\d\\d))|(\\d|\\d\\d)",
boost::regex_constants::icase | boost::regex_constants::perl);
std::vector<std::string> result;
boost::algorithm::split_regex(result, s, boost::regex(":"));
bool matched = regex_match(s, e);
if (matched)
{
if (1 == result.size())
{
minutes = stringToInt(result[0]);
}
if (2 == result.size())
{
minutes = stringToInt(result[1]);
hours = stringToInt(result[0]);
}
if (3 == result.size())
{
seconds = stringToInt(result[2]);
minutes = stringToInt(result[1]);
hours = stringToInt(result[0]);
}
return 10 * (3600 * hours + 60 * minutes + seconds);
}
return std::numeric_limits<unsigned>::max();
}
#endif // EXTRACTION_HELPER_FUNCTIONS_H_

79
Extractor/ExtractionWay.h Normal file
View File

@ -0,0 +1,79 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef EXTRACTION_WAY_H
#define EXTRACTION_WAY_H
#include "../DataStructures/HashTable.h"
#include "../typedefs.h"
#include <string>
#include <vector>
struct ExtractionWay
{
ExtractionWay() { Clear(); }
inline void Clear()
{
id = SPECIAL_NODEID;
nameID = INVALID_NAMEID;
path.clear();
keyVals.clear();
direction = ExtractionWay::notSure;
speed = -1;
backward_speed = -1;
duration = -1;
type = -1;
access = true;
roundabout = false;
isAccessRestricted = false;
ignoreInGrid = false;
}
enum Directions
{ notSure = 0,
oneway,
bidirectional,
opposite };
unsigned id;
unsigned nameID;
double speed;
double backward_speed;
double duration;
Directions direction;
std::string name;
short type;
bool access;
bool roundabout;
bool isAccessRestricted;
bool ignoreInGrid;
std::vector<NodeID> path;
HashTable<std::string, std::string> keyVals;
};
#endif // EXTRACTION_WAY_H

View File

@ -0,0 +1,166 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "ExtractorCallbacks.h"
#include "ExtractionContainers.h"
#include "ExtractionWay.h"
#include "../DataStructures/Restriction.h"
#include "../DataStructures/ImportNode.h"
#include "../Util/SimpleLogger.h"
#include <osrm/Coordinate.h>
#include <limits>
#include <string>
#include <vector>
ExtractorCallbacks::ExtractorCallbacks(ExtractionContainers &extraction_containers,
std::unordered_map<std::string, NodeID> &string_map)
: string_map(string_map), external_memory(extraction_containers)
{
}
/** warning: caller needs to take care of synchronization! */
void ExtractorCallbacks::ProcessNode(const ExternalMemoryNode &n)
{
if (n.lat <= 85 * COORDINATE_PRECISION && n.lat >= -85 * COORDINATE_PRECISION)
{
external_memory.all_nodes_list.push_back(n);
}
}
bool ExtractorCallbacks::ProcessRestriction(const InputRestrictionContainer &restriction)
{
external_memory.restrictions_list.push_back(restriction);
return true;
}
/** warning: caller needs to take care of synchronization! */
void ExtractorCallbacks::ProcessWay(ExtractionWay &parsed_way)
{
if ((0 >= parsed_way.speed) && (0 >= parsed_way.duration))
{ // Only true if the way is specified by the speed profile
return;
}
if (std::numeric_limits<unsigned>::max() == parsed_way.id)
{
SimpleLogger().Write(logDEBUG) << "found bogus way with id: " << parsed_way.id
<< " of size " << parsed_way.path.size();
return;
}
if (0 < parsed_way.duration)
{
// TODO: iterate all way segments and set duration corresponding to the length of each
// segment
parsed_way.speed = parsed_way.duration / (parsed_way.path.size() - 1);
}
if (std::numeric_limits<double>::epsilon() >= std::abs(-1. - parsed_way.speed))
{
SimpleLogger().Write(logDEBUG) << "found way with bogus speed, id: " << parsed_way.id;
return;
}
// Get the unique identifier for the street name
const auto &string_map_iterator = string_map.find(parsed_way.name);
if (string_map.end() == string_map_iterator)
{
parsed_way.nameID = external_memory.name_list.size();
external_memory.name_list.push_back(parsed_way.name);
string_map.insert(std::make_pair(parsed_way.name, parsed_way.nameID));
}
else
{
parsed_way.nameID = string_map_iterator->second;
}
if (ExtractionWay::opposite == parsed_way.direction)
{
std::reverse(parsed_way.path.begin(), parsed_way.path.end());
parsed_way.direction = ExtractionWay::oneway;
}
const bool split_bidirectional_edge =
(parsed_way.backward_speed > 0) && (parsed_way.speed != parsed_way.backward_speed);
for (unsigned n = 0; n < parsed_way.path.size() - 1; ++n)
{
external_memory.all_edges_list.push_back(InternalExtractorEdge(
parsed_way.path[n],
parsed_way.path[n + 1],
parsed_way.type,
(split_bidirectional_edge ? ExtractionWay::oneway : parsed_way.direction),
parsed_way.speed,
parsed_way.nameID,
parsed_way.roundabout,
parsed_way.ignoreInGrid,
(0 < parsed_way.duration),
parsed_way.isAccessRestricted,
false,
split_bidirectional_edge));
external_memory.used_node_id_list.push_back(parsed_way.path[n]);
}
external_memory.used_node_id_list.push_back(parsed_way.path.back());
// The following information is needed to identify start and end segments of restrictions
external_memory.way_start_end_id_list.push_back(
WayIDStartAndEndEdge(parsed_way.id,
parsed_way.path[0],
parsed_way.path[1],
parsed_way.path[parsed_way.path.size() - 2],
parsed_way.path.back()));
if (split_bidirectional_edge)
{ // Only true if the way should be split
std::reverse(parsed_way.path.begin(), parsed_way.path.end());
for (std::vector<NodeID>::size_type n = 0; n < parsed_way.path.size() - 1; ++n)
{
external_memory.all_edges_list.push_back(
InternalExtractorEdge(parsed_way.path[n],
parsed_way.path[n + 1],
parsed_way.type,
ExtractionWay::oneway,
parsed_way.backward_speed,
parsed_way.nameID,
parsed_way.roundabout,
parsed_way.ignoreInGrid,
(0 < parsed_way.duration),
parsed_way.isAccessRestricted,
(ExtractionWay::oneway == parsed_way.direction),
split_bidirectional_edge));
}
external_memory.way_start_end_id_list.push_back(
WayIDStartAndEndEdge(parsed_way.id,
parsed_way.path[0],
parsed_way.path[1],
parsed_way.path[parsed_way.path.size() - 2],
parsed_way.path.back()));
}
}

View File

@ -1,6 +1,6 @@
/*
Copyright (c) 2017, Project OSRM contributors
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
@ -25,39 +25,39 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef STORAGE_HPP
#define STORAGE_HPP
#ifndef EXTRACTOR_CALLBACKS_H
#define EXTRACTOR_CALLBACKS_H
#include "storage/shared_data_index.hpp"
#include "storage/shared_datatype.hpp"
#include "storage/storage_config.hpp"
#include "../typedefs.h"
#include <filesystem>
#include <unordered_map>
#include <string>
#include <vector>
namespace osrm::storage
struct ExternalMemoryNode;
class ExtractionContainers;
struct ExtractionWay;
struct InputRestrictionContainer;
class ExtractorCallbacks
{
void populateLayoutFromFile(const std::filesystem::path &path, storage::BaseDataLayout &layout);
class Storage
{
public:
Storage(StorageConfig config);
int Run(int max_wait, const std::string &name, bool only_metric);
void PopulateStaticData(const SharedDataIndex &index);
void PopulateUpdatableData(const SharedDataIndex &index);
void PopulateLayout(storage::BaseDataLayout &layout,
const std::vector<std::pair<bool, std::filesystem::path>> &files);
std::string PopulateLayoutWithRTree(storage::BaseDataLayout &layout);
std::vector<std::pair<bool, std::filesystem::path>> GetUpdatableFiles();
std::vector<std::pair<bool, std::filesystem::path>> GetStaticFiles();
private:
StorageConfig config;
};
} // namespace osrm::storage
std::unordered_map<std::string, NodeID> &string_map;
ExtractionContainers &external_memory;
#endif
public:
ExtractorCallbacks() = delete;
ExtractorCallbacks(const ExtractorCallbacks &) = delete;
explicit ExtractorCallbacks(ExtractionContainers &extraction_containers,
std::unordered_map<std::string, NodeID> &string_map);
// warning: caller needs to take care of synchronization!
void ProcessNode(const ExternalMemoryNode &node);
// warning: caller needs to take care of synchronization!
bool ProcessRestriction(const InputRestrictionContainer &restriction);
// warning: caller needs to take care of synchronization!
void ProcessWay(ExtractionWay &way);
};
#endif /* EXTRACTOR_CALLBACKS_H */

View File

@ -0,0 +1,114 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef EXTRACTORSTRUCTS_H_
#define EXTRACTORSTRUCTS_H_
#include "../DataStructures/HashTable.h"
#include "../DataStructures/ImportNode.h"
#include "../typedefs.h"
#include <limits>
#include <string>
struct ExtractorRelation
{
ExtractorRelation() : type(unknown) {}
enum
{ unknown = 0,
ferry,
turnRestriction } type;
HashTable<std::string, std::string> keyVals;
};
struct WayIDStartAndEndEdge
{
unsigned wayID;
NodeID firstStart;
NodeID firstTarget;
NodeID lastStart;
NodeID lastTarget;
WayIDStartAndEndEdge()
: wayID(std::numeric_limits<unsigned>::max()), firstStart(std::numeric_limits<unsigned>::max()), firstTarget(std::numeric_limits<unsigned>::max()), lastStart(std::numeric_limits<unsigned>::max()),
lastTarget(std::numeric_limits<unsigned>::max())
{
}
explicit WayIDStartAndEndEdge(unsigned w, NodeID fs, NodeID ft, NodeID ls, NodeID lt)
: wayID(w), firstStart(fs), firstTarget(ft), lastStart(ls), lastTarget(lt)
{
}
static WayIDStartAndEndEdge min_value()
{
return WayIDStartAndEndEdge((std::numeric_limits<unsigned>::min)(),
(std::numeric_limits<unsigned>::min)(),
(std::numeric_limits<unsigned>::min)(),
(std::numeric_limits<unsigned>::min)(),
(std::numeric_limits<unsigned>::min)());
}
static WayIDStartAndEndEdge max_value()
{
return WayIDStartAndEndEdge((std::numeric_limits<unsigned>::max)(),
(std::numeric_limits<unsigned>::max)(),
(std::numeric_limits<unsigned>::max)(),
(std::numeric_limits<unsigned>::max)(),
(std::numeric_limits<unsigned>::max)());
}
};
struct CmpWayByID
{
typedef WayIDStartAndEndEdge value_type;
bool operator()(const WayIDStartAndEndEdge &a, const WayIDStartAndEndEdge &b) const
{
return a.wayID < b.wayID;
}
value_type max_value() { return WayIDStartAndEndEdge::max_value(); }
value_type min_value() { return WayIDStartAndEndEdge::min_value(); }
};
struct Cmp
{
typedef NodeID value_type;
bool operator()(const NodeID a, const NodeID b) const { return a < b; }
value_type max_value() { return 0xffffffff; }
value_type min_value() { return 0x0; }
};
struct CmpNodeByID
{
typedef ExternalMemoryNode value_type;
bool operator()(const ExternalMemoryNode &a, const ExternalMemoryNode &b) const
{
return a.id < b.id;
}
value_type max_value() { return ExternalMemoryNode::max_value(); }
value_type min_value() { return ExternalMemoryNode::min_value(); }
};
#endif /* EXTRACTORSTRUCTS_H_ */

View File

@ -0,0 +1,120 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef INTERNAL_EXTRACTOR_EDGE_H
#define INTERNAL_EXTRACTOR_EDGE_H
#include "../typedefs.h"
#include <osrm/Coordinate.h>
#include <boost/assert.hpp>
struct InternalExtractorEdge
{
InternalExtractorEdge()
: start(0), target(0), type(0), direction(0), speed(0), name_id(0), is_roundabout(false),
is_in_tiny_cc(false), is_duration_set(false), is_access_restricted(false),
is_contra_flow(false), is_split(false)
{
}
explicit InternalExtractorEdge(NodeID start,
NodeID target,
short type,
short direction,
double speed,
unsigned name_id,
bool is_roundabout,
bool is_in_tiny_cc,
bool is_duration_set,
bool is_access_restricted,
bool is_contra_flow,
bool is_split)
: start(start), target(target), type(type), direction(direction), speed(speed),
name_id(name_id), is_roundabout(is_roundabout), is_in_tiny_cc(is_in_tiny_cc),
is_duration_set(is_duration_set), is_access_restricted(is_access_restricted),
is_contra_flow(is_contra_flow), is_split(is_split)
{
BOOST_ASSERT(0 <= type);
}
// necessary static util functions for stxxl's sorting
static InternalExtractorEdge min_value()
{
return InternalExtractorEdge(0, 0, 0, 0, 0, 0, false, false, false, false, false, false);
}
static InternalExtractorEdge max_value()
{
return InternalExtractorEdge(
SPECIAL_NODEID, SPECIAL_NODEID, 0, 0, 0, 0, false, false, false, false, false, false);
}
NodeID start;
NodeID target;
short type;
short direction;
double speed;
unsigned name_id;
bool is_roundabout;
bool is_in_tiny_cc;
bool is_duration_set;
bool is_access_restricted;
bool is_contra_flow;
bool is_split;
FixedPointCoordinate source_coordinate;
FixedPointCoordinate target_coordinate;
};
struct CmpEdgeByStartID
{
typedef InternalExtractorEdge value_type;
bool operator()(const InternalExtractorEdge &a, const InternalExtractorEdge &b) const
{
return a.start < b.start;
}
value_type max_value() { return InternalExtractorEdge::max_value(); }
value_type min_value() { return InternalExtractorEdge::min_value(); }
};
struct CmpEdgeByTargetID
{
typedef InternalExtractorEdge value_type;
bool operator()(const InternalExtractorEdge &a, const InternalExtractorEdge &b) const
{
return a.target < b.target;
}
value_type max_value() { return InternalExtractorEdge::max_value(); }
value_type min_value() { return InternalExtractorEdge::min_value(); }
};
#endif // INTERNAL_EXTRACTOR_EDGE_H

638
Extractor/PBFParser.cpp Normal file
View File

@ -0,0 +1,638 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "PBFParser.h"
#include "ExtractionWay.h"
#include "ExtractorCallbacks.h"
#include "ScriptingEnvironment.h"
#include "../DataStructures/HashTable.h"
#include "../DataStructures/ImportNode.h"
#include "../DataStructures/Restriction.h"
#include "../Util/MachineInfo.h"
#include "../Util/OpenMPWrapper.h"
#include "../Util/OSRMException.h"
#include "../Util/SimpleLogger.h"
#include "../typedefs.h"
#include <osrm/Coordinate.h>
#include <zlib.h>
#include <functional>
#include <limits>
#include <thread>
PBFParser::PBFParser(const char *fileName,
ExtractorCallbacks *extractor_callbacks,
ScriptingEnvironment &scripting_environment)
: BaseParser(extractor_callbacks, scripting_environment)
{
GOOGLE_PROTOBUF_VERIFY_VERSION;
// TODO: What is the bottleneck here? Filling the queue or reading the stuff from disk?
// NOTE: With Lua scripting, it is parsing the stuff. I/O is virtually for free.
// Max 2500 items in queue, hardcoded.
thread_data_queue = std::make_shared<ConcurrentQueue<ParserThreadData *>>(2500);
input.open(fileName, std::ios::in | std::ios::binary);
if (!input)
{
throw OSRMException("pbf file not found.");
}
block_count = 0;
group_count = 0;
}
PBFParser::~PBFParser()
{
if (input.is_open())
{
input.close();
}
// Clean up any leftover ThreadData objects in the queue
ParserThreadData *thread_data;
while (thread_data_queue->try_pop(thread_data))
{
delete thread_data;
}
google::protobuf::ShutdownProtobufLibrary();
SimpleLogger().Write(logDEBUG) << "parsed " << block_count << " blocks from pbf with "
<< group_count << " groups";
}
inline bool PBFParser::ReadHeader()
{
ParserThreadData init_data;
/** read Header */
if (!readPBFBlobHeader(input, &init_data))
{
return false;
}
if (readBlob(input, &init_data))
{
if (!init_data.PBFHeaderBlock.ParseFromArray(&(init_data.charBuffer[0]),
init_data.charBuffer.size()))
{
std::cerr << "[error] Header not parseable!" << std::endl;
return false;
}
const auto feature_size = init_data.PBFHeaderBlock.required_features_size();
for (int i = 0; i < feature_size; ++i)
{
const std::string &feature = init_data.PBFHeaderBlock.required_features(i);
bool supported = false;
if ("OsmSchema-V0.6" == feature)
{
supported = true;
}
else if ("DenseNodes" == feature)
{
supported = true;
}
if (!supported)
{
std::cerr << "[error] required feature not supported: " << feature.data()
<< std::endl;
return false;
}
}
}
else
{
std::cerr << "[error] blob not loaded!" << std::endl;
}
return true;
}
inline void PBFParser::ReadData()
{
bool keep_running = true;
do
{
ParserThreadData *thread_data = new ParserThreadData();
keep_running = readNextBlock(input, thread_data);
if (keep_running)
{
thread_data_queue->push(thread_data);
}
else
{
// No more data to read, parse stops when nullptr encountered
thread_data_queue->push(nullptr);
delete thread_data;
}
} while (keep_running);
}
inline void PBFParser::ParseData()
{
while (true)
{
ParserThreadData *thread_data;
thread_data_queue->wait_and_pop(thread_data);
if (nullptr == thread_data)
{
thread_data_queue->push(nullptr); // Signal end of data for other threads
break;
}
loadBlock(thread_data);
int group_size = thread_data->PBFprimitiveBlock.primitivegroup_size();
for (int i = 0; i < group_size; ++i)
{
thread_data->currentGroupID = i;
loadGroup(thread_data);
if (thread_data->entityTypeIndicator == TypeNode)
{
parseNode(thread_data);
}
if (thread_data->entityTypeIndicator == TypeWay)
{
parseWay(thread_data);
}
if (thread_data->entityTypeIndicator == TypeRelation)
{
parseRelation(thread_data);
}
if (thread_data->entityTypeIndicator == TypeDenseNode)
{
parseDenseNode(thread_data);
}
}
delete thread_data;
thread_data = nullptr;
}
}
inline bool PBFParser::Parse()
{
// Start the read and parse threads
std::thread read_thread(std::bind(&PBFParser::ReadData, this));
// Open several parse threads that are synchronized before call to
std::thread parse_thread(std::bind(&PBFParser::ParseData, this));
// Wait for the threads to finish
read_thread.join();
parse_thread.join();
return true;
}
inline void PBFParser::parseDenseNode(ParserThreadData *thread_data)
{
const OSMPBF::DenseNodes &dense =
thread_data->PBFprimitiveBlock.primitivegroup(thread_data->currentGroupID).dense();
int denseTagIndex = 0;
int64_t m_lastDenseID = 0;
int64_t m_lastDenseLatitude = 0;
int64_t m_lastDenseLongitude = 0;
const int number_of_nodes = dense.id_size();
std::vector<ImportNode> extracted_nodes_vector(number_of_nodes);
for (int i = 0; i < number_of_nodes; ++i)
{
m_lastDenseID += dense.id(i);
m_lastDenseLatitude += dense.lat(i);
m_lastDenseLongitude += dense.lon(i);
extracted_nodes_vector[i].id = m_lastDenseID;
extracted_nodes_vector[i].lat =
COORDINATE_PRECISION *
((double)m_lastDenseLatitude * thread_data->PBFprimitiveBlock.granularity() +
thread_data->PBFprimitiveBlock.lat_offset()) /
NANO;
extracted_nodes_vector[i].lon =
COORDINATE_PRECISION *
((double)m_lastDenseLongitude * thread_data->PBFprimitiveBlock.granularity() +
thread_data->PBFprimitiveBlock.lon_offset()) /
NANO;
while (denseTagIndex < dense.keys_vals_size())
{
const int tagValue = dense.keys_vals(denseTagIndex);
if (0 == tagValue)
{
++denseTagIndex;
break;
}
const int keyValue = dense.keys_vals(denseTagIndex + 1);
const std::string &key = thread_data->PBFprimitiveBlock.stringtable().s(tagValue);
const std::string &value = thread_data->PBFprimitiveBlock.stringtable().s(keyValue);
extracted_nodes_vector[i].keyVals.emplace(key, value);
denseTagIndex += 2;
}
}
#pragma omp parallel
{
const int thread_num = omp_get_thread_num();
#pragma omp parallel for schedule(guided)
for (int i = 0; i < number_of_nodes; ++i)
{
ImportNode &import_node = extracted_nodes_vector[i];
ParseNodeInLua(import_node, scripting_environment.getLuaStateForThreadID(thread_num));
}
}
for (const ImportNode &import_node : extracted_nodes_vector)
{
extractor_callbacks->ProcessNode(import_node);
}
}
inline void PBFParser::parseNode(ParserThreadData *)
{
throw OSRMException("Parsing of simple nodes not supported. PBF should use dense nodes");
}
inline void PBFParser::parseRelation(ParserThreadData *thread_data)
{
// TODO: leave early, if relation is not a restriction
// TODO: reuse rawRestriction container
if (!use_turn_restrictions)
{
return;
}
const OSMPBF::PrimitiveGroup &group =
thread_data->PBFprimitiveBlock.primitivegroup(thread_data->currentGroupID);
for (int i = 0, relation_size = group.relations_size(); i < relation_size; ++i)
{
std::string except_tag_string;
const OSMPBF::Relation &inputRelation =
thread_data->PBFprimitiveBlock.primitivegroup(thread_data->currentGroupID).relations(i);
bool isRestriction = false;
bool isOnlyRestriction = false;
for (int k = 0, endOfKeys = inputRelation.keys_size(); k < endOfKeys; ++k)
{
const std::string &key =
thread_data->PBFprimitiveBlock.stringtable().s(inputRelation.keys(k));
const std::string &val =
thread_data->PBFprimitiveBlock.stringtable().s(inputRelation.vals(k));
if ("type" == key)
{
if ("restriction" == val)
{
isRestriction = true;
}
else
{
break;
}
}
if (("restriction" == key) && (val.find("only_") == 0))
{
isOnlyRestriction = true;
}
if ("except" == key)
{
except_tag_string = val;
}
}
if (isRestriction && ShouldIgnoreRestriction(except_tag_string))
{
continue;
}
if (isRestriction)
{
int64_t lastRef = 0;
InputRestrictionContainer currentRestrictionContainer(isOnlyRestriction);
for (int rolesIndex = 0, last_role = inputRelation.roles_sid_size();
rolesIndex < last_role;
++rolesIndex)
{
const std::string &role = thread_data->PBFprimitiveBlock.stringtable().s(
inputRelation.roles_sid(rolesIndex));
lastRef += inputRelation.memids(rolesIndex);
if (!("from" == role || "to" == role || "via" == role))
{
continue;
}
switch (inputRelation.types(rolesIndex))
{
case 0: // node
if ("from" == role || "to" == role)
{ // Only via should be a node
continue;
}
assert("via" == role);
if (std::numeric_limits<unsigned>::max() != currentRestrictionContainer.viaNode)
{
currentRestrictionContainer.viaNode = std::numeric_limits<unsigned>::max();
}
assert(std::numeric_limits<unsigned>::max() == currentRestrictionContainer.viaNode);
currentRestrictionContainer.restriction.viaNode = lastRef;
break;
case 1: // way
assert("from" == role || "to" == role || "via" == role);
if ("from" == role)
{
currentRestrictionContainer.fromWay = lastRef;
}
if ("to" == role)
{
currentRestrictionContainer.toWay = lastRef;
}
if ("via" == role)
{
assert(currentRestrictionContainer.restriction.toNode == std::numeric_limits<unsigned>::max());
currentRestrictionContainer.viaNode = lastRef;
}
break;
case 2: // relation, not used. relations relating to relations are evil.
continue;
assert(false);
break;
default: // should not happen
assert(false);
break;
}
}
if (!extractor_callbacks->ProcessRestriction(currentRestrictionContainer))
{
std::cerr << "[PBFParser] relation not parsed" << std::endl;
}
}
}
}
inline void PBFParser::parseWay(ParserThreadData *thread_data)
{
const int number_of_ways =
thread_data->PBFprimitiveBlock.primitivegroup(thread_data->currentGroupID).ways_size();
std::vector<ExtractionWay> parsed_way_vector(number_of_ways);
for (int i = 0; i < number_of_ways; ++i)
{
const OSMPBF::Way &inputWay =
thread_data->PBFprimitiveBlock.primitivegroup(thread_data->currentGroupID).ways(i);
parsed_way_vector[i].id = inputWay.id();
unsigned pathNode(0);
const int number_of_referenced_nodes = inputWay.refs_size();
for (int j = 0; j < number_of_referenced_nodes; ++j)
{
pathNode += inputWay.refs(j);
parsed_way_vector[i].path.push_back(pathNode);
}
assert(inputWay.keys_size() == inputWay.vals_size());
const int number_of_keys = inputWay.keys_size();
for (int j = 0; j < number_of_keys; ++j)
{
const std::string &key =
thread_data->PBFprimitiveBlock.stringtable().s(inputWay.keys(j));
const std::string &val =
thread_data->PBFprimitiveBlock.stringtable().s(inputWay.vals(j));
parsed_way_vector[i].keyVals.emplace(key, val);
}
}
#pragma omp parallel for schedule(guided)
for (int i = 0; i < number_of_ways; ++i)
{
ExtractionWay &extraction_way = parsed_way_vector[i];
if (2 <= extraction_way.path.size())
{
ParseWayInLua(extraction_way,
scripting_environment.getLuaStateForThreadID(omp_get_thread_num()));
}
}
for (ExtractionWay &extraction_way : parsed_way_vector)
{
if (2 <= extraction_way.path.size())
{
extractor_callbacks->ProcessWay(extraction_way);
}
}
}
inline void PBFParser::loadGroup(ParserThreadData *thread_data)
{
#ifndef NDEBUG
++group_count;
#endif
const OSMPBF::PrimitiveGroup &group =
thread_data->PBFprimitiveBlock.primitivegroup(thread_data->currentGroupID);
thread_data->entityTypeIndicator = TypeDummy;
if (0 != group.nodes_size())
{
thread_data->entityTypeIndicator = TypeNode;
}
if (0 != group.ways_size())
{
thread_data->entityTypeIndicator = TypeWay;
}
if (0 != group.relations_size())
{
thread_data->entityTypeIndicator = TypeRelation;
}
if (group.has_dense())
{
thread_data->entityTypeIndicator = TypeDenseNode;
assert(0 != group.dense().id_size());
}
assert(thread_data->entityTypeIndicator != TypeDummy);
}
inline void PBFParser::loadBlock(ParserThreadData *thread_data)
{
++block_count;
thread_data->currentGroupID = 0;
thread_data->currentEntityID = 0;
}
inline bool PBFParser::readPBFBlobHeader(std::fstream &stream, ParserThreadData *thread_data)
{
int size(0);
stream.read((char *)&size, sizeof(int));
size = SwapEndian(size);
if (stream.eof())
{
return false;
}
if (size > MAX_BLOB_HEADER_SIZE || size < 0)
{
return false;
}
char *data = new char[size];
stream.read(data, size * sizeof(data[0]));
bool dataSuccessfullyParsed = (thread_data->PBFBlobHeader).ParseFromArray(data, size);
delete[] data;
return dataSuccessfullyParsed;
}
inline bool PBFParser::unpackZLIB(std::fstream &, ParserThreadData *thread_data)
{
unsigned rawSize = thread_data->PBFBlob.raw_size();
char *unpackedDataArray = new char[rawSize];
z_stream compressedDataStream;
compressedDataStream.next_in = (unsigned char *)thread_data->PBFBlob.zlib_data().data();
compressedDataStream.avail_in = thread_data->PBFBlob.zlib_data().size();
compressedDataStream.next_out = (unsigned char *)unpackedDataArray;
compressedDataStream.avail_out = rawSize;
compressedDataStream.zalloc = Z_NULL;
compressedDataStream.zfree = Z_NULL;
compressedDataStream.opaque = Z_NULL;
int ret = inflateInit(&compressedDataStream);
if (ret != Z_OK)
{
std::cerr << "[error] failed to init zlib stream" << std::endl;
delete[] unpackedDataArray;
return false;
}
ret = inflate(&compressedDataStream, Z_FINISH);
if (ret != Z_STREAM_END)
{
std::cerr << "[error] failed to inflate zlib stream" << std::endl;
std::cerr << "[error] Error type: " << ret << std::endl;
delete[] unpackedDataArray;
return false;
}
ret = inflateEnd(&compressedDataStream);
if (ret != Z_OK)
{
std::cerr << "[error] failed to deinit zlib stream" << std::endl;
delete[] unpackedDataArray;
return false;
}
thread_data->charBuffer.clear();
thread_data->charBuffer.resize(rawSize);
std::copy(unpackedDataArray, unpackedDataArray + rawSize, thread_data->charBuffer.begin());
delete[] unpackedDataArray;
return true;
}
inline bool PBFParser::unpackLZMA(std::fstream &, ParserThreadData *) { return false; }
inline bool PBFParser::readBlob(std::fstream &stream, ParserThreadData *thread_data)
{
if (stream.eof())
{
return false;
}
const int size = thread_data->PBFBlobHeader.datasize();
if (size < 0 || size > MAX_BLOB_SIZE)
{
std::cerr << "[error] invalid Blob size:" << size << std::endl;
return false;
}
char *data = new char[size];
stream.read(data, sizeof(data[0]) * size);
if (!thread_data->PBFBlob.ParseFromArray(data, size))
{
std::cerr << "[error] failed to parse blob" << std::endl;
delete[] data;
return false;
}
if (thread_data->PBFBlob.has_raw())
{
const std::string &data = thread_data->PBFBlob.raw();
thread_data->charBuffer.clear();
thread_data->charBuffer.resize(data.size());
std::copy(data.begin(), data.end(), thread_data->charBuffer.begin());
}
else if (thread_data->PBFBlob.has_zlib_data())
{
if (!unpackZLIB(stream, thread_data))
{
std::cerr << "[error] zlib data encountered that could not be unpacked" << std::endl;
delete[] data;
return false;
}
}
else if (thread_data->PBFBlob.has_lzma_data())
{
if (!unpackLZMA(stream, thread_data))
{
std::cerr << "[error] lzma data encountered that could not be unpacked" << std::endl;
}
delete[] data;
return false;
}
else
{
std::cerr << "[error] Blob contains no data" << std::endl;
delete[] data;
return false;
}
delete[] data;
return true;
}
bool PBFParser::readNextBlock(std::fstream &stream, ParserThreadData *thread_data)
{
if (stream.eof())
{
return false;
}
if (!readPBFBlobHeader(stream, thread_data))
{
return false;
}
if (thread_data->PBFBlobHeader.type() != "OSMData")
{
return false;
}
if (!readBlob(stream, thread_data))
{
return false;
}
if (!thread_data->PBFprimitiveBlock.ParseFromArray(&(thread_data->charBuffer[0]),
thread_data->charBuffer.size()))
{
std::cerr << "failed to parse PrimitiveBlock" << std::endl;
return false;
}
return true;
}

99
Extractor/PBFParser.h Normal file
View File

@ -0,0 +1,99 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PBFPARSER_H_
#define PBFPARSER_H_
#include "BaseParser.h"
#include "../DataStructures/ConcurrentQueue.h"
#include <osmpbf/fileformat.pb.h>
#include <osmpbf/osmformat.pb.h>
#include <fstream>
#include <memory>
class PBFParser : public BaseParser
{
enum EntityType
{ TypeDummy = 0,
TypeNode = 1,
TypeWay = 2,
TypeRelation = 4,
TypeDenseNode = 8 };
struct ParserThreadData
{
int currentGroupID;
int currentEntityID;
EntityType entityTypeIndicator;
OSMPBF::BlobHeader PBFBlobHeader;
OSMPBF::Blob PBFBlob;
OSMPBF::HeaderBlock PBFHeaderBlock;
OSMPBF::PrimitiveBlock PBFprimitiveBlock;
std::vector<char> charBuffer;
};
public:
PBFParser(const char *file_name, ExtractorCallbacks *extractor_callbacks, ScriptingEnvironment &scripting_environment);
virtual ~PBFParser();
inline bool ReadHeader();
inline bool Parse();
private:
inline void ReadData();
inline void ParseData();
inline void parseDenseNode(ParserThreadData *thread_data);
inline void parseNode(ParserThreadData *thread_data);
inline void parseRelation(ParserThreadData *thread_data);
inline void parseWay(ParserThreadData *thread_data);
inline void loadGroup(ParserThreadData *thread_data);
inline void loadBlock(ParserThreadData *thread_data);
inline bool readPBFBlobHeader(std::fstream &stream, ParserThreadData *thread_data);
inline bool unpackZLIB(std::fstream &stream, ParserThreadData *thread_data);
inline bool unpackLZMA(std::fstream &stream, ParserThreadData *thread_data);
inline bool readBlob(std::fstream &stream, ParserThreadData *thread_data);
inline bool readNextBlock(std::fstream &stream, ParserThreadData *thread_data);
static const int NANO = 1000 * 1000 * 1000;
static const int MAX_BLOB_HEADER_SIZE = 64 * 1024;
static const int MAX_BLOB_SIZE = 32 * 1024 * 1024;
unsigned group_count;
unsigned block_count;
std::fstream input; // the input stream to parse
std::shared_ptr<ConcurrentQueue<ParserThreadData *>> thread_data_queue;
};
#endif /* PBFPARSER_H_ */

View File

@ -0,0 +1,124 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "ScriptingEnvironment.h"
#include "ExtractionHelperFunctions.h"
#include "ExtractionWay.h"
#include "../DataStructures/ImportNode.h"
#include "../Util/LuaUtil.h"
#include "../Util/OpenMPWrapper.h"
#include "../Util/OSRMException.h"
#include "../Util/SimpleLogger.h"
#include "../typedefs.h"
ScriptingEnvironment::ScriptingEnvironment() {}
ScriptingEnvironment::ScriptingEnvironment(const char *file_name)
{
SimpleLogger().Write() << "Using script " << file_name;
// Create a new lua state
for (int i = 0; i < omp_get_max_threads(); ++i)
{
lua_state_vector.push_back(luaL_newstate());
}
// Connect LuaBind to this lua state for all threads
#pragma omp parallel
{
lua_State *lua_state = getLuaStateForThreadID(omp_get_thread_num());
luabind::open(lua_state);
// open utility libraries string library;
luaL_openlibs(lua_state);
luaAddScriptFolderToLoadPath(lua_state, file_name);
// Add our function to the state's global scope
luabind::module(lua_state)[
luabind::def("print", LUA_print<std::string>),
luabind::def("durationIsValid", durationIsValid),
luabind::def("parseDuration", parseDuration)
];
luabind::module(lua_state)[luabind::class_<HashTable<std::string, std::string>>("keyVals")
.def("Add", &HashTable<std::string, std::string>::Add)
.def("Find", &HashTable<std::string, std::string>::Find)
.def("Holds", &HashTable<std::string, std::string>::Holds)];
luabind::module(lua_state)[luabind::class_<ImportNode>("Node")
.def(luabind::constructor<>())
.def_readwrite("lat", &ImportNode::lat)
.def_readwrite("lon", &ImportNode::lon)
.def_readonly("id", &ImportNode::id)
.def_readwrite("bollard", &ImportNode::bollard)
.def_readwrite("traffic_light", &ImportNode::trafficLight)
.def_readwrite("tags", &ImportNode::keyVals)];
luabind::module(lua_state)
[luabind::class_<ExtractionWay>("Way")
.def(luabind::constructor<>())
.def_readonly("id", &ExtractionWay::id)
.def_readwrite("name", &ExtractionWay::name)
.def_readwrite("speed", &ExtractionWay::speed)
.def_readwrite("backward_speed", &ExtractionWay::backward_speed)
.def_readwrite("duration", &ExtractionWay::duration)
.def_readwrite("type", &ExtractionWay::type)
.def_readwrite("access", &ExtractionWay::access)
.def_readwrite("roundabout", &ExtractionWay::roundabout)
.def_readwrite("is_access_restricted", &ExtractionWay::isAccessRestricted)
.def_readwrite("ignore_in_grid", &ExtractionWay::ignoreInGrid)
.def_readwrite("tags", &ExtractionWay::keyVals)
.def_readwrite("direction", &ExtractionWay::direction)
.enum_("constants")[
luabind::value("notSure", 0),
luabind::value("oneway", 1),
luabind::value("bidirectional", 2),
luabind::value("opposite", 3)
]];
// fails on c++11/OS X 10.9
luabind::module(lua_state)[luabind::class_<std::vector<std::string>>("vector").def(
"Add",
static_cast<void (std::vector<std::string>::*)(const std::string &)>(
&std::vector<std::string>::push_back))];
if (0 != luaL_dofile(lua_state, file_name))
{
throw OSRMException("ERROR occured in scripting block");
}
}
}
ScriptingEnvironment::~ScriptingEnvironment()
{
for (unsigned i = 0; i < lua_state_vector.size(); ++i)
{
// lua_state_vector[i];
}
}
lua_State *ScriptingEnvironment::getLuaStateForThreadID(const int id) { return lua_state_vector[id]; }

View File

@ -0,0 +1,47 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SCRIPTINGENVIRONMENT_H_
#define SCRIPTINGENVIRONMENT_H_
#include <vector>
struct lua_State;
class ScriptingEnvironment
{
public:
ScriptingEnvironment();
explicit ScriptingEnvironment(const char *file_name);
virtual ~ScriptingEnvironment();
lua_State *getLuaStateForThreadID(const int);
std::vector<lua_State *> lua_state_vector;
};
#endif /* SCRIPTINGENVIRONMENT_H_ */

347
Extractor/XMLParser.cpp Normal file
View File

@ -0,0 +1,347 @@
/*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "XMLParser.h"
#include "ExtractionWay.h"
#include "ExtractorCallbacks.h"
#include "../DataStructures/HashTable.h"
#include "../DataStructures/ImportNode.h"
#include "../DataStructures/InputReaderFactory.h"
#include "../DataStructures/Restriction.h"
#include "../Util/SimpleLogger.h"
#include "../Util/StringUtil.h"
#include "../typedefs.h"
#include <osrm/Coordinate.h>
XMLParser::XMLParser(const char *filename, ExtractorCallbacks *ec, ScriptingEnvironment &se)
: BaseParser(ec, se)
{
inputReader = inputReaderFactory(filename);
}
bool XMLParser::ReadHeader() { return (xmlTextReaderRead(inputReader) == 1); }
bool XMLParser::Parse()
{
while (xmlTextReaderRead(inputReader) == 1)
{
const int type = xmlTextReaderNodeType(inputReader);
// 1 is Element
if (type != 1)
{
continue;
}
xmlChar *currentName = xmlTextReaderName(inputReader);
if (currentName == NULL)
{
continue;
}
if (xmlStrEqual(currentName, (const xmlChar *)"node") == 1)
{
ImportNode n = ReadXMLNode();
ParseNodeInLua(n, lua_state);
extractor_callbacks->ProcessNode(n);
}
if (xmlStrEqual(currentName, (const xmlChar *)"way") == 1)
{
ExtractionWay way = ReadXMLWay();
ParseWayInLua(way, lua_state);
extractor_callbacks->ProcessWay(way);
}
if (use_turn_restrictions && xmlStrEqual(currentName, (const xmlChar *)"relation") == 1)
{
InputRestrictionContainer r = ReadXMLRestriction();
if ((UINT_MAX != r.fromWay) && !extractor_callbacks->ProcessRestriction(r))
{
std::cerr << "[XMLParser] restriction not parsed" << std::endl;
}
}
xmlFree(currentName);
}
return true;
}
InputRestrictionContainer XMLParser::ReadXMLRestriction()
{
InputRestrictionContainer restriction;
std::string except_tag_string;
if (xmlTextReaderIsEmptyElement(inputReader) != 1)
{
const int depth = xmlTextReaderDepth(inputReader);
while (xmlTextReaderRead(inputReader) == 1)
{
const int child_type = xmlTextReaderNodeType(inputReader);
if (child_type != 1 && child_type != 15)
{
continue;
}
const int childDepth = xmlTextReaderDepth(inputReader);
xmlChar *childName = xmlTextReaderName(inputReader);
if (childName == NULL)
{
continue;
}
if (depth == childDepth && child_type == 15 &&
xmlStrEqual(childName, (const xmlChar *)"relation") == 1)
{
xmlFree(childName);
break;
}
if (child_type != 1)
{
xmlFree(childName);
continue;
}
if (xmlStrEqual(childName, (const xmlChar *)"tag") == 1)
{
xmlChar *k = xmlTextReaderGetAttribute(inputReader, (const xmlChar *)"k");
xmlChar *value = xmlTextReaderGetAttribute(inputReader, (const xmlChar *)"v");
if (k != NULL && value != NULL)
{
if (xmlStrEqual(k, (const xmlChar *)"restriction") &&
(0 == std::string((const char *)value).find("only_")))
{
restriction.restriction.flags.isOnly = true;
}
if (xmlStrEqual(k, (const xmlChar *)"except"))
{
except_tag_string = (const char *)value;
}
}
if (k != NULL)
{
xmlFree(k);
}
if (value != NULL)
{
xmlFree(value);
}
}
else if (xmlStrEqual(childName, (const xmlChar *)"member") == 1)
{
xmlChar *ref = xmlTextReaderGetAttribute(inputReader, (const xmlChar *)"ref");
if (ref != NULL)
{
xmlChar *role = xmlTextReaderGetAttribute(inputReader, (const xmlChar *)"role");
xmlChar *type = xmlTextReaderGetAttribute(inputReader, (const xmlChar *)"type");
if (xmlStrEqual(role, (const xmlChar *)"to") &&
xmlStrEqual(type, (const xmlChar *)"way"))
{
restriction.toWay = stringToUint((const char *)ref);
}
if (xmlStrEqual(role, (const xmlChar *)"from") &&
xmlStrEqual(type, (const xmlChar *)"way"))
{
restriction.fromWay = stringToUint((const char *)ref);
}
if (xmlStrEqual(role, (const xmlChar *)"via") &&
xmlStrEqual(type, (const xmlChar *)"node"))
{
restriction.restriction.viaNode = stringToUint((const char *)ref);
}
if (NULL != type)
{
xmlFree(type);
}
if (NULL != role)
{
xmlFree(role);
}
if (NULL != ref)
{
xmlFree(ref);
}
}
}
xmlFree(childName);
}
}
if (ShouldIgnoreRestriction(except_tag_string))
{
restriction.fromWay = UINT_MAX; // workaround to ignore the restriction
}
return restriction;
}
ExtractionWay XMLParser::ReadXMLWay()
{
ExtractionWay way;
if (xmlTextReaderIsEmptyElement(inputReader) == 1)
{
return way;
}
const int depth = xmlTextReaderDepth(inputReader);
while (xmlTextReaderRead(inputReader) == 1)
{
const int child_type = xmlTextReaderNodeType(inputReader);
if (child_type != 1 && child_type != 15)
{
continue;
}
const int childDepth = xmlTextReaderDepth(inputReader);
xmlChar *childName = xmlTextReaderName(inputReader);
if (childName == NULL)
{
continue;
}
if (depth == childDepth && child_type == 15 &&
xmlStrEqual(childName, (const xmlChar *)"way") == 1)
{
xmlChar *id = xmlTextReaderGetAttribute(inputReader, (const xmlChar *)"id");
way.id = stringToUint((char *)id);
xmlFree(id);
xmlFree(childName);
break;
}
if (child_type != 1)
{
xmlFree(childName);
continue;
}
if (xmlStrEqual(childName, (const xmlChar *)"tag") == 1)
{
xmlChar *k = xmlTextReaderGetAttribute(inputReader, (const xmlChar *)"k");
xmlChar *value = xmlTextReaderGetAttribute(inputReader, (const xmlChar *)"v");
if (k != NULL && value != NULL)
{
way.keyVals.Add(std::string((char *)k), std::string((char *)value));
}
if (k != NULL)
{
xmlFree(k);
}
if (value != NULL)
{
xmlFree(value);
}
}
else if (xmlStrEqual(childName, (const xmlChar *)"nd") == 1)
{
xmlChar *ref = xmlTextReaderGetAttribute(inputReader, (const xmlChar *)"ref");
if (ref != NULL)
{
way.path.push_back(stringToUint((const char *)ref));
xmlFree(ref);
}
}
xmlFree(childName);
}
return way;
}
ImportNode XMLParser::ReadXMLNode()
{
ImportNode node;
xmlChar *attribute = xmlTextReaderGetAttribute(inputReader, (const xmlChar *)"lat");
if (attribute != NULL)
{
node.lat = COORDINATE_PRECISION * StringToDouble((const char *)attribute);
xmlFree(attribute);
}
attribute = xmlTextReaderGetAttribute(inputReader, (const xmlChar *)"lon");
if (attribute != NULL)
{
node.lon = COORDINATE_PRECISION * StringToDouble((const char *)attribute);
xmlFree(attribute);
}
attribute = xmlTextReaderGetAttribute(inputReader, (const xmlChar *)"id");
if (attribute != NULL)
{
node.id = stringToUint((const char *)attribute);
xmlFree(attribute);
}
if (xmlTextReaderIsEmptyElement(inputReader) == 1)
{
return node;
}
const int depth = xmlTextReaderDepth(inputReader);
while (xmlTextReaderRead(inputReader) == 1)
{
const int child_type = xmlTextReaderNodeType(inputReader);
// 1 = Element, 15 = EndElement
if (child_type != 1 && child_type != 15)
{
continue;
}
const int childDepth = xmlTextReaderDepth(inputReader);
xmlChar *childName = xmlTextReaderName(inputReader);
if (childName == NULL)
{
continue;
}
if (depth == childDepth && child_type == 15 &&
xmlStrEqual(childName, (const xmlChar *)"node") == 1)
{
xmlFree(childName);
break;
}
if (child_type != 1)
{
xmlFree(childName);
continue;
}
if (xmlStrEqual(childName, (const xmlChar *)"tag") == 1)
{
xmlChar *k = xmlTextReaderGetAttribute(inputReader, (const xmlChar *)"k");
xmlChar *value = xmlTextReaderGetAttribute(inputReader, (const xmlChar *)"v");
if (k != NULL && value != NULL)
{
node.keyVals.emplace(std::string((char *)(k)), std::string((char *)(value)));
}
if (k != NULL)
{
xmlFree(k);
}
if (value != NULL)
{
xmlFree(value);
}
}
xmlFree(childName);
}
return node;
}

View File

@ -1,6 +1,6 @@
/*
Copyright (c) 2017, Project OSRM contributors
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
@ -25,28 +25,26 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CONTRACTOR_CONTRACTOR_HPP
#define CONTRACTOR_CONTRACTOR_HPP
#ifndef XMLPARSER_H_
#define XMLPARSER_H_
#include "contractor/contractor_config.hpp"
#include "ExtractorCallbacks.h"
#include "BaseParser.h"
namespace osrm::contractor
{
#include <libxml/xmlreader.h>
/// Base class of osrm-contract
class Contractor
class XMLParser : public BaseParser
{
public:
explicit Contractor(const ContractorConfig &config_) : config{config_} {}
Contractor(const Contractor &) = delete;
Contractor &operator=(const Contractor &) = delete;
int Run();
XMLParser(const char *filename, ExtractorCallbacks *ec, ScriptingEnvironment &se);
bool ReadHeader();
bool Parse();
private:
ContractorConfig config;
InputRestrictionContainer ReadXMLRestriction();
ExtractionWay ReadXMLWay();
ImportNode ReadXMLNode();
xmlTextReaderPtr inputReader;
};
} // namespace osrm::contractor
#endif // PROCESSING_CHAIN_HPP
#endif /* XMLPARSER_H_ */

7
Gemfile Normal file
View File

@ -0,0 +1,7 @@
source "http://rubygems.org"
gem "cucumber"
gem "rake"
gem "osmlib-base"
gem "sys-proctable"
gem "rspec-expectations"

Some files were not shown because too many files have changed in this diff Show More