Merge branch 'develop'

This commit is contained in:
Patrick Niklaus 2015-09-16 18:18:54 +02:00
commit 6ad1cd3fb5
299 changed files with 17000 additions and 5682 deletions

View File

@ -7,7 +7,7 @@ install:
- sudo apt-add-repository -y ppa:ubuntu-toolchain-r/test - sudo apt-add-repository -y ppa:ubuntu-toolchain-r/test
- sudo add-apt-repository -y ppa:boost-latest/ppa - sudo add-apt-repository -y ppa:boost-latest/ppa
- sudo apt-get update >/dev/null - sudo apt-get update >/dev/null
- sudo apt-get -q install protobuf-compiler libprotoc-dev libprotobuf7 libprotobuf-dev libbz2-dev libstxxl-dev libstxxl1 libxml2-dev libzip-dev lua5.1 liblua5.1-0-dev rubygems libtbb-dev - sudo apt-get -q install libbz2-dev libstxxl-dev libstxxl1 libxml2-dev libzip-dev lua5.1 liblua5.1-0-dev rubygems libtbb-dev
- sudo apt-get -q install g++-4.8 - sudo apt-get -q install g++-4.8
- sudo apt-get install libboost1.54-all-dev - sudo apt-get install libboost1.54-all-dev
- sudo apt-get install libgdal-dev - sudo apt-get install libgdal-dev
@ -17,8 +17,6 @@ install:
- curl -s https://gist.githubusercontent.com/DennisOSRM/803a64a9178ec375069f/raw/ | sudo bash - curl -s https://gist.githubusercontent.com/DennisOSRM/803a64a9178ec375069f/raw/ | sudo bash
# cmake # cmake
- curl -s https://gist.githubusercontent.com/DennisOSRM/5fad9bee5c7f09fd7fc9/raw/ | sudo bash - curl -s https://gist.githubusercontent.com/DennisOSRM/5fad9bee5c7f09fd7fc9/raw/ | sudo bash
# osmpbf library
- curl -s https://gist.githubusercontent.com/DennisOSRM/13b1b4fe38a57ead850e/raw/install_osmpbf.sh | sudo bash
before_script: before_script:
- rvm use 1.9.3 - rvm use 1.9.3
- gem install bundler - gem install bundler

View File

@ -51,10 +51,11 @@ configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/util/git_sha.cpp ${CMAKE_CURRENT_SOURCE_DIR}/util/git_sha.cpp
) )
file(GLOB ExtractorGlob extractor/*.cpp) file(GLOB ExtractorGlob extractor/*.cpp)
file(GLOB ImporterGlob data_structures/import_edge.cpp data_structures/external_memory_node.cpp) file(GLOB ImporterGlob data_structures/import_edge.cpp data_structures/external_memory_node.cpp data_structures/raster_source.cpp)
add_library(IMPORT OBJECT ${ImporterGlob}) add_library(IMPORT OBJECT ${ImporterGlob})
add_library(LOGGER OBJECT util/simple_logger.cpp) add_library(LOGGER OBJECT util/simple_logger.cpp)
add_library(PHANTOMNODE OBJECT data_structures/phantom_node.cpp) add_library(PHANTOMNODE OBJECT data_structures/phantom_node.cpp)
add_library(RASTERSOURCE OBJECT data_structures/raster_source.cpp)
add_library(EXCEPTION OBJECT util/osrm_exception.cpp) add_library(EXCEPTION OBJECT util/osrm_exception.cpp)
add_library(MERCATOR OBJECT util/mercator.cpp) add_library(MERCATOR OBJECT util/mercator.cpp)
add_library(ANGLE OBJECT util/compute_angle.cpp) add_library(ANGLE OBJECT util/compute_angle.cpp)
@ -63,21 +64,22 @@ set(ExtractorSources extract.cpp ${ExtractorGlob})
add_executable(osrm-extract ${ExtractorSources} $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:GITDESCRIPTION> $<TARGET_OBJECTS:IMPORT> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR>) add_executable(osrm-extract ${ExtractorSources} $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:GITDESCRIPTION> $<TARGET_OBJECTS:IMPORT> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR>)
add_library(RESTRICTION OBJECT data_structures/restriction_map.cpp) add_library(RESTRICTION OBJECT data_structures/restriction_map.cpp)
add_library(COMPRESSEDEDGE OBJECT data_structures/compressed_edge_container.cpp)
add_library(GRAPHCOMPRESSOR OBJECT algorithms/graph_compressor.cpp)
file(GLOB PrepareGlob contractor/*.cpp data_structures/hilbert_value.cpp {RestrictionMapGlob}) file(GLOB PrepareGlob contractor/*.cpp data_structures/hilbert_value.cpp {RestrictionMapGlob})
set(PrepareSources prepare.cpp ${PrepareGlob}) set(PrepareSources prepare.cpp ${PrepareGlob})
add_executable(osrm-prepare ${PrepareSources} $<TARGET_OBJECTS:ANGLE> $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:GITDESCRIPTION> $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:IMPORT> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:RESTRICTION> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR>) add_executable(osrm-prepare ${PrepareSources} $<TARGET_OBJECTS:ANGLE> $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:GITDESCRIPTION> $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:IMPORT> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:RESTRICTION> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR> $<TARGET_OBJECTS:COMPRESSEDEDGE> $<TARGET_OBJECTS:GRAPHCOMPRESSOR>)
file(GLOB ServerGlob server/*.cpp) file(GLOB ServerGlob server/*.cpp)
file(GLOB DescriptorGlob descriptors/*.cpp) file(GLOB DescriptorGlob descriptors/*.cpp)
file(GLOB DatastructureGlob data_structures/search_engine_data.cpp data_structures/route_parameters.cpp util/bearing.cpp) file(GLOB DatastructureGlob data_structures/search_engine_data.cpp data_structures/route_parameters.cpp util/bearing.cpp)
list(REMOVE_ITEM DatastructureGlob data_structures/Coordinate.cpp) file(GLOB CoordinateGlob data_structures/coordinate.cpp algorithms/coordinate_calculation.cpp)
file(GLOB CoordinateGlob data_structures/coordinate*.cpp) file(GLOB AlgorithmGlob algorithms/polyline_compressor.cpp algorithms/polyline_formatter.cpp algorithms/douglas_peucker.cpp)
file(GLOB AlgorithmGlob algorithms/*.cpp)
file(GLOB HttpGlob server/http/*.cpp) file(GLOB HttpGlob server/http/*.cpp)
file(GLOB LibOSRMGlob library/*.cpp) file(GLOB LibOSRMGlob library/*.cpp)
file(GLOB DataStructureTestsGlob unit_tests/data_structures/*.cpp data_structures/hilbert_value.cpp) file(GLOB DataStructureTestsGlob unit_tests/data_structures/*.cpp data_structures/hilbert_value.cpp)
file(GLOB AlgorithmTestsGlob unit_tests/algorithms/*.cpp) file(GLOB AlgorithmTestsGlob unit_tests/algorithms/*.cpp algorithms/graph_compressor.cpp)
set( set(
OSRMSources OSRMSources
@ -90,7 +92,7 @@ set(
add_library(COORDINATE OBJECT ${CoordinateGlob}) add_library(COORDINATE OBJECT ${CoordinateGlob})
add_library(GITDESCRIPTION OBJECT util/git_sha.cpp) add_library(GITDESCRIPTION OBJECT util/git_sha.cpp)
add_library(OSRM ${OSRMSources} $<TARGET_OBJECTS:ANGLE> $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:GITDESCRIPTION> $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:PHANTOMNODE> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR> $<TARGET_OBJECTS:IMPORT>) add_library(OSRM ${OSRMSources} $<TARGET_OBJECTS:ANGLE> $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:GITDESCRIPTION> $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:RESTRICTION> $<TARGET_OBJECTS:PHANTOMNODE> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR> $<TARGET_OBJECTS:IMPORT>)
add_library(FINGERPRINT OBJECT util/fingerprint.cpp) add_library(FINGERPRINT OBJECT util/fingerprint.cpp)
add_dependencies(FINGERPRINT FingerPrintConfigure) add_dependencies(FINGERPRINT FingerPrintConfigure)
@ -101,8 +103,8 @@ add_executable(osrm-routed routed.cpp ${ServerGlob} $<TARGET_OBJECTS:EXCEPTION>)
add_executable(osrm-datastore datastore.cpp $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:GITDESCRIPTION> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR>) add_executable(osrm-datastore datastore.cpp $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:FINGERPRINT> $<TARGET_OBJECTS:GITDESCRIPTION> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR>)
# Unit tests # Unit tests
add_executable(datastructure-tests EXCLUDE_FROM_ALL unit_tests/datastructure_tests.cpp ${DataStructureTestsGlob} $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:PHANTOMNODE> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR>) add_executable(datastructure-tests EXCLUDE_FROM_ALL unit_tests/datastructure_tests.cpp ${DataStructureTestsGlob} $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:PHANTOMNODE> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR> $<TARGET_OBJECTS:COMPRESSEDEDGE> $<TARGET_OBJECTS:GRAPHCOMPRESSOR> $<TARGET_OBJECTS:RESTRICTION> $<TARGET_OBJECTS:RASTERSOURCE>)
add_executable(algorithm-tests EXCLUDE_FROM_ALL unit_tests/algorithm_tests.cpp ${AlgorithmTestsGlob} $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:PHANTOMNODE> $<TARGET_OBJECTS:EXCEPTION>) add_executable(algorithm-tests EXCLUDE_FROM_ALL unit_tests/algorithm_tests.cpp ${AlgorithmTestsGlob} $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:PHANTOMNODE> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:RESTRICTION> $<TARGET_OBJECTS:COMPRESSEDEDGE>)
# Benchmarks # Benchmarks
add_executable(rtree-bench EXCLUDE_FROM_ALL benchmarks/static_rtree.cpp $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:PHANTOMNODE> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR>) add_executable(rtree-bench EXCLUDE_FROM_ALL benchmarks/static_rtree.cpp $<TARGET_OBJECTS:COORDINATE> $<TARGET_OBJECTS:LOGGER> $<TARGET_OBJECTS:PHANTOMNODE> $<TARGET_OBJECTS:EXCEPTION> $<TARGET_OBJECTS:MERCATOR>)
@ -280,16 +282,6 @@ if(OPENMP_FOUND)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
endif() endif()
find_package(OSMPBF REQUIRED)
include_directories(${OSMPBF_INCLUDE_DIR})
target_link_libraries(osrm-extract ${OSMPBF_LIBRARY})
target_link_libraries(osrm-prepare ${OSMPBF_LIBRARY})
find_package(Protobuf REQUIRED)
include_directories(${PROTOBUF_INCLUDE_DIRS})
target_link_libraries(osrm-extract ${PROTOBUF_LIBRARY})
target_link_libraries(osrm-prepare ${PROTOBUF_LIBRARY})
find_package(BZip2 REQUIRED) find_package(BZip2 REQUIRED)
include_directories(${BZIP_INCLUDE_DIRS}) include_directories(${BZIP_INCLUDE_DIRS})
target_link_libraries(osrm-extract ${BZIP2_LIBRARIES}) target_link_libraries(osrm-extract ${BZIP2_LIBRARIES})

View File

@ -31,6 +31,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <cmath> #include <cmath>
#include <vector> #include <vector>
#include <utility>
struct NormalDistribution struct NormalDistribution
{ {
@ -80,11 +81,11 @@ class BayesClassifier
}; };
using ClassificationT = std::pair<ClassLabel, double>; using ClassificationT = std::pair<ClassLabel, double>;
BayesClassifier(const PositiveDistributionT &positive_distribution, BayesClassifier(PositiveDistributionT positive_distribution,
const NegativeDistributionT &negative_distribution, NegativeDistributionT negative_distribution,
const double positive_apriori_probability) const double positive_apriori_probability)
: positive_distribution(positive_distribution), : positive_distribution(std::move(positive_distribution)),
negative_distribution(negative_distribution), negative_distribution(std::move(negative_distribution)),
positive_apriori_probability(positive_apriori_probability), positive_apriori_probability(positive_apriori_probability),
negative_apriori_probability(1. - positive_apriori_probability) negative_apriori_probability(1. - positive_apriori_probability)
{ {

View File

@ -46,7 +46,10 @@ constexpr static const float RAD = 0.017453292519943295769236907684886f;
constexpr static const float earth_radius = 6372797.560856f; constexpr static const float earth_radius = 6372797.560856f;
} }
double coordinate_calculation::great_circle_distance(const int lat1, namespace coordinate_calculation
{
double great_circle_distance(const int lat1,
const int lon1, const int lon1,
const int lat2, const int lat2,
const int lon2) const int lon2)
@ -74,21 +77,21 @@ double coordinate_calculation::great_circle_distance(const int lat1,
return earth_radius * cHarv; return earth_radius * cHarv;
} }
double coordinate_calculation::great_circle_distance(const FixedPointCoordinate &coordinate_1, double great_circle_distance(const FixedPointCoordinate &coordinate_1,
const FixedPointCoordinate &coordinate_2) const FixedPointCoordinate &coordinate_2)
{ {
return great_circle_distance(coordinate_1.lat, coordinate_1.lon, coordinate_2.lat, return great_circle_distance(coordinate_1.lat, coordinate_1.lon, coordinate_2.lat,
coordinate_2.lon); coordinate_2.lon);
} }
float coordinate_calculation::euclidean_distance(const FixedPointCoordinate &coordinate_1, float euclidean_distance(const FixedPointCoordinate &coordinate_1,
const FixedPointCoordinate &coordinate_2) const FixedPointCoordinate &coordinate_2)
{ {
return euclidean_distance(coordinate_1.lat, coordinate_1.lon, coordinate_2.lat, return euclidean_distance(coordinate_1.lat, coordinate_1.lon, coordinate_2.lat,
coordinate_2.lon); coordinate_2.lon);
} }
float coordinate_calculation::euclidean_distance(const int lat1, float euclidean_distance(const int lat1,
const int lon1, const int lon1,
const int lat2, const int lat2,
const int lon2) const int lon2)
@ -108,7 +111,7 @@ float coordinate_calculation::euclidean_distance(const int lat1,
return std::hypot(x_value, y_value) * earth_radius; return std::hypot(x_value, y_value) * earth_radius;
} }
float coordinate_calculation::perpendicular_distance(const FixedPointCoordinate &source_coordinate, float perpendicular_distance(const FixedPointCoordinate &source_coordinate,
const FixedPointCoordinate &target_coordinate, const FixedPointCoordinate &target_coordinate,
const FixedPointCoordinate &query_location) const FixedPointCoordinate &query_location)
{ {
@ -119,7 +122,7 @@ float coordinate_calculation::perpendicular_distance(const FixedPointCoordinate
nearest_location, ratio); nearest_location, ratio);
} }
float coordinate_calculation::perpendicular_distance(const FixedPointCoordinate &segment_source, float perpendicular_distance(const FixedPointCoordinate &segment_source,
const FixedPointCoordinate &segment_target, const FixedPointCoordinate &segment_target,
const FixedPointCoordinate &query_location, const FixedPointCoordinate &query_location,
FixedPointCoordinate &nearest_location, FixedPointCoordinate &nearest_location,
@ -132,7 +135,7 @@ float coordinate_calculation::perpendicular_distance(const FixedPointCoordinate
nearest_location, ratio); nearest_location, ratio);
} }
float coordinate_calculation::perpendicular_distance_from_projected_coordinate( float perpendicular_distance_from_projected_coordinate(
const FixedPointCoordinate &source_coordinate, const FixedPointCoordinate &source_coordinate,
const FixedPointCoordinate &target_coordinate, const FixedPointCoordinate &target_coordinate,
const FixedPointCoordinate &query_location, const FixedPointCoordinate &query_location,
@ -146,7 +149,7 @@ float coordinate_calculation::perpendicular_distance_from_projected_coordinate(
nearest_location, ratio); nearest_location, ratio);
} }
float coordinate_calculation::perpendicular_distance_from_projected_coordinate( float perpendicular_distance_from_projected_coordinate(
const FixedPointCoordinate &segment_source, const FixedPointCoordinate &segment_source,
const FixedPointCoordinate &segment_target, const FixedPointCoordinate &segment_target,
const FixedPointCoordinate &query_location, const FixedPointCoordinate &query_location,
@ -221,29 +224,29 @@ float coordinate_calculation::perpendicular_distance_from_projected_coordinate(
BOOST_ASSERT(nearest_location.is_valid()); BOOST_ASSERT(nearest_location.is_valid());
const float approximate_distance = const float approximate_distance =
coordinate_calculation::euclidean_distance(query_location, nearest_location); euclidean_distance(query_location, nearest_location);
BOOST_ASSERT(0.f <= approximate_distance); BOOST_ASSERT(0.f <= approximate_distance);
return approximate_distance; return approximate_distance;
} }
void coordinate_calculation::lat_or_lon_to_string(const int value, std::string &output) void lat_or_lon_to_string(const int value, std::string &output)
{ {
char buffer[12]; char buffer[12];
buffer[11] = 0; // zero termination buffer[11] = 0; // zero termination
output = printInt<11, 6>(buffer, value); output = printInt<11, 6>(buffer, value);
} }
float coordinate_calculation::deg_to_rad(const float degree) float deg_to_rad(const float degree)
{ {
return degree * (static_cast<float>(M_PI) / 180.f); return degree * (static_cast<float>(M_PI) / 180.f);
} }
float coordinate_calculation::rad_to_deg(const float radian) float rad_to_deg(const float radian)
{ {
return radian * (180.f * static_cast<float>(M_1_PI)); return radian * (180.f * static_cast<float>(M_1_PI));
} }
float coordinate_calculation::bearing(const FixedPointCoordinate &first_coordinate, float bearing(const FixedPointCoordinate &first_coordinate,
const FixedPointCoordinate &second_coordinate) const FixedPointCoordinate &second_coordinate)
{ {
const float lon_diff = const float lon_diff =
@ -266,3 +269,5 @@ float coordinate_calculation::bearing(const FixedPointCoordinate &first_coordina
} }
return result; return result;
} }
}

View File

@ -33,38 +33,38 @@ struct FixedPointCoordinate;
#include <string> #include <string>
#include <utility> #include <utility>
struct coordinate_calculation namespace coordinate_calculation
{ {
static double double
great_circle_distance(const int lat1, const int lon1, const int lat2, const int lon2); great_circle_distance(const int lat1, const int lon1, const int lat2, const int lon2);
static double great_circle_distance(const FixedPointCoordinate &first_coordinate, double great_circle_distance(const FixedPointCoordinate &first_coordinate,
const FixedPointCoordinate &second_coordinate); const FixedPointCoordinate &second_coordinate);
static float euclidean_distance(const FixedPointCoordinate &first_coordinate, float euclidean_distance(const FixedPointCoordinate &first_coordinate,
const FixedPointCoordinate &second_coordinate); const FixedPointCoordinate &second_coordinate);
static float euclidean_distance(const int lat1, const int lon1, const int lat2, const int lon2); float euclidean_distance(const int lat1, const int lon1, const int lat2, const int lon2);
static void lat_or_lon_to_string(const int value, std::string &output); void lat_or_lon_to_string(const int value, std::string &output);
static float perpendicular_distance(const FixedPointCoordinate &segment_source, float perpendicular_distance(const FixedPointCoordinate &segment_source,
const FixedPointCoordinate &segment_target, const FixedPointCoordinate &segment_target,
const FixedPointCoordinate &query_location); const FixedPointCoordinate &query_location);
static float perpendicular_distance(const FixedPointCoordinate &segment_source, float perpendicular_distance(const FixedPointCoordinate &segment_source,
const FixedPointCoordinate &segment_target, const FixedPointCoordinate &segment_target,
const FixedPointCoordinate &query_location, const FixedPointCoordinate &query_location,
FixedPointCoordinate &nearest_location, FixedPointCoordinate &nearest_location,
float &ratio); float &ratio);
static float perpendicular_distance_from_projected_coordinate( float perpendicular_distance_from_projected_coordinate(
const FixedPointCoordinate &segment_source, const FixedPointCoordinate &segment_source,
const FixedPointCoordinate &segment_target, const FixedPointCoordinate &segment_target,
const FixedPointCoordinate &query_location, const FixedPointCoordinate &query_location,
const std::pair<double, double> &projected_coordinate); const std::pair<double, double> &projected_coordinate);
static float perpendicular_distance_from_projected_coordinate( float perpendicular_distance_from_projected_coordinate(
const FixedPointCoordinate &segment_source, const FixedPointCoordinate &segment_source,
const FixedPointCoordinate &segment_target, const FixedPointCoordinate &segment_target,
const FixedPointCoordinate &query_location, const FixedPointCoordinate &query_location,
@ -72,11 +72,11 @@ struct coordinate_calculation
FixedPointCoordinate &nearest_location, FixedPointCoordinate &nearest_location,
float &ratio); float &ratio);
static float deg_to_rad(const float degree); float deg_to_rad(const float degree);
static float rad_to_deg(const float radian); float rad_to_deg(const float radian);
static float bearing(const FixedPointCoordinate &first_coordinate, float bearing(const FixedPointCoordinate &first_coordinate,
const FixedPointCoordinate &second_coordinate); const FixedPointCoordinate &second_coordinate);
}; }
#endif // COORDINATE_CALCULATION #endif // COORDINATE_CALCULATION

View File

@ -99,8 +99,8 @@ void DouglasPeucker::Run(RandomAccessIt begin, RandomAccessIt end, const unsigne
{ {
BOOST_ASSERT_MSG(zoom_level < DOUGLAS_PEUCKER_THRESHOLDS.size(), "unsupported zoom level"); BOOST_ASSERT_MSG(zoom_level < DOUGLAS_PEUCKER_THRESHOLDS.size(), "unsupported zoom level");
RandomAccessIt left_border = begin; auto left_border = begin;
RandomAccessIt right_border = std::next(begin); auto right_border = std::next(begin);
// Sweep over array and identify those ranges that need to be checked // Sweep over array and identify those ranges that need to be checked
do do
{ {

View File

@ -0,0 +1,188 @@
#include "graph_compressor.hpp"
#include "../data_structures/compressed_edge_container.hpp"
#include "../data_structures/dynamic_graph.hpp"
#include "../data_structures/node_based_graph.hpp"
#include "../data_structures/restriction_map.hpp"
#include "../data_structures/percent.hpp"
#include "../util/simple_logger.hpp"
GraphCompressor::GraphCompressor(SpeedProfileProperties speed_profile)
: speed_profile(std::move(speed_profile))
{
}
void GraphCompressor::Compress(const std::unordered_set<NodeID>& barrier_nodes,
const std::unordered_set<NodeID>& traffic_lights,
RestrictionMap& restriction_map,
NodeBasedDynamicGraph& graph,
CompressedEdgeContainer& geometry_compressor)
{
const unsigned original_number_of_nodes = graph.GetNumberOfNodes();
const unsigned original_number_of_edges = graph.GetNumberOfEdges();
Percent progress(original_number_of_nodes);
for (const NodeID node_v : osrm::irange(0u, original_number_of_nodes))
{
progress.printStatus(node_v);
// only contract degree 2 vertices
if (2 != graph.GetOutDegree(node_v))
{
continue;
}
// don't contract barrier node
if (barrier_nodes.end() != barrier_nodes.find(node_v))
{
continue;
}
// check if v is a via node for a turn restriction, i.e. a 'directed' barrier node
if (restriction_map.IsViaNode(node_v))
{
continue;
}
// reverse_e2 forward_e2
// u <---------- v -----------> w
// ----------> <-----------
// forward_e1 reverse_e1
//
// Will be compressed to:
//
// reverse_e1
// u <---------- w
// ---------->
// forward_e1
//
// If the edges are compatible.
const bool reverse_edge_order = graph.GetEdgeData(graph.BeginEdges(node_v)).reversed;
const EdgeID forward_e2 = graph.BeginEdges(node_v) + reverse_edge_order;
BOOST_ASSERT(SPECIAL_EDGEID != forward_e2);
BOOST_ASSERT(forward_e2 >= graph.BeginEdges(node_v) &&
forward_e2 < graph.EndEdges(node_v));
const EdgeID reverse_e2 = graph.BeginEdges(node_v) + 1 - reverse_edge_order;
BOOST_ASSERT(SPECIAL_EDGEID != reverse_e2);
BOOST_ASSERT(reverse_e2 >= graph.BeginEdges(node_v) &&
reverse_e2 < graph.EndEdges(node_v));
const EdgeData &fwd_edge_data2 = graph.GetEdgeData(forward_e2);
const EdgeData &rev_edge_data2 = graph.GetEdgeData(reverse_e2);
const NodeID node_w = graph.GetTarget(forward_e2);
BOOST_ASSERT(SPECIAL_NODEID != node_w);
BOOST_ASSERT(node_v != node_w);
const NodeID node_u = graph.GetTarget(reverse_e2);
BOOST_ASSERT(SPECIAL_NODEID != node_u);
BOOST_ASSERT(node_u != node_v);
const EdgeID forward_e1 = graph.FindEdge(node_u, node_v);
BOOST_ASSERT(SPECIAL_EDGEID != forward_e1);
BOOST_ASSERT(node_v == graph.GetTarget(forward_e1));
const EdgeID reverse_e1 = graph.FindEdge(node_w, node_v);
BOOST_ASSERT(SPECIAL_EDGEID != reverse_e1);
BOOST_ASSERT(node_v == graph.GetTarget(reverse_e1));
const EdgeData &fwd_edge_data1 = graph.GetEdgeData(forward_e1);
const EdgeData &rev_edge_data1 = graph.GetEdgeData(reverse_e1);
if (graph.FindEdgeInEitherDirection(node_u, node_w) != SPECIAL_EDGEID)
{
continue;
}
// this case can happen if two ways with different names overlap
if (fwd_edge_data1.name_id != rev_edge_data1.name_id ||
fwd_edge_data2.name_id != rev_edge_data2.name_id)
{
continue;
}
if (fwd_edge_data1.IsCompatibleTo(fwd_edge_data2) && rev_edge_data1.IsCompatibleTo(rev_edge_data2))
{
BOOST_ASSERT(graph.GetEdgeData(forward_e1).name_id ==
graph.GetEdgeData(reverse_e1).name_id);
BOOST_ASSERT(graph.GetEdgeData(forward_e2).name_id ==
graph.GetEdgeData(reverse_e2).name_id);
// Get distances before graph is modified
const int forward_weight1 = graph.GetEdgeData(forward_e1).distance;
const int forward_weight2 = graph.GetEdgeData(forward_e2).distance;
BOOST_ASSERT(0 != forward_weight1);
BOOST_ASSERT(0 != forward_weight2);
const int reverse_weight1 = graph.GetEdgeData(reverse_e1).distance;
const int reverse_weight2 = graph.GetEdgeData(reverse_e2).distance;
BOOST_ASSERT(0 != reverse_weight1);
BOOST_ASSERT(0 != reverse_weight2);
const bool has_node_penalty = traffic_lights.find(node_v) != traffic_lights.end();
// add weight of e2's to e1
graph.GetEdgeData(forward_e1).distance += fwd_edge_data2.distance;
graph.GetEdgeData(reverse_e1).distance += rev_edge_data2.distance;
if (has_node_penalty)
{
graph.GetEdgeData(forward_e1).distance +=
speed_profile.traffic_signal_penalty;
graph.GetEdgeData(reverse_e1).distance +=
speed_profile.traffic_signal_penalty;
}
// extend e1's to targets of e2's
graph.SetTarget(forward_e1, node_w);
graph.SetTarget(reverse_e1, node_u);
// remove e2's (if bidir, otherwise only one)
graph.DeleteEdge(node_v, forward_e2);
graph.DeleteEdge(node_v, reverse_e2);
// update any involved turn restrictions
restriction_map.FixupStartingTurnRestriction(node_u, node_v, node_w);
restriction_map.FixupArrivingTurnRestriction(node_u, node_v, node_w, graph);
restriction_map.FixupStartingTurnRestriction(node_w, node_v, node_u);
restriction_map.FixupArrivingTurnRestriction(node_w, node_v, node_u, graph);
// store compressed geometry in container
geometry_compressor.CompressEdge(
forward_e1, forward_e2, node_v, node_w,
forward_weight1 + (has_node_penalty ? speed_profile.traffic_signal_penalty : 0),
forward_weight2);
geometry_compressor.CompressEdge(
reverse_e1, reverse_e2, node_v, node_u, reverse_weight1,
reverse_weight2 + (has_node_penalty ? speed_profile.traffic_signal_penalty : 0));
}
}
PrintStatistics(original_number_of_nodes, original_number_of_edges, graph);
}
void GraphCompressor::PrintStatistics(unsigned original_number_of_nodes,
unsigned original_number_of_edges,
const NodeBasedDynamicGraph& graph) const
{
unsigned new_node_count = 0;
unsigned new_edge_count = 0;
for (const auto i : osrm::irange(0u, graph.GetNumberOfNodes()))
{
if (graph.GetOutDegree(i) > 0)
{
++new_node_count;
new_edge_count += (graph.EndEdges(i) - graph.BeginEdges(i));
}
}
SimpleLogger().Write() << "Node compression ratio: "
<< new_node_count / (double)original_number_of_nodes;
SimpleLogger().Write() << "Edge compression ratio: "
<< new_edge_count / (double)original_number_of_edges;
}

View File

@ -0,0 +1,62 @@
/*
Copyright (c) 2014, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GEOMETRY_COMPRESSOR_HPP
#define GEOMETRY_COMPRESSOR_HPP
#include "../typedefs.h"
#include "../contractor/speed_profile.hpp"
#include "../data_structures/node_based_graph.hpp"
#include <memory>
#include <unordered_set>
class CompressedEdgeContainer;
class RestrictionMap;
class GraphCompressor
{
using EdgeData = NodeBasedDynamicGraph::EdgeData;
public:
GraphCompressor(SpeedProfileProperties speed_profile);
void Compress(const std::unordered_set<NodeID>& barrier_nodes,
const std::unordered_set<NodeID>& traffic_lights,
RestrictionMap& restriction_map,
NodeBasedDynamicGraph& graph,
CompressedEdgeContainer& geometry_compressor);
private:
void PrintStatistics(unsigned original_number_of_nodes,
unsigned original_number_of_edges,
const NodeBasedDynamicGraph& graph) const;
SpeedProfileProperties speed_profile;
};
#endif

View File

@ -79,7 +79,7 @@ struct ObjectEncoder
replaceAll(encoded, "-", "+"); replaceAll(encoded, "-", "+");
replaceAll(encoded, "_", "/"); replaceAll(encoded, "_", "/");
std::copy(binary_t(encoded.begin()), binary_t(encoded.begin() + encoded.length() - 1), std::copy(binary_t(encoded.begin()), binary_t(encoded.begin() + encoded.length()),
reinterpret_cast<char *>(&object)); reinterpret_cast<char *>(&object));
} }
catch (...) catch (...)

View File

@ -25,17 +25,14 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#ifndef TINY_COMPONENTS_HPP #ifndef TARJAN_SCC_HPP
#define TINY_COMPONENTS_HPP #define TARJAN_SCC_HPP
#include "../typedefs.h" #include "../typedefs.h"
#include "../data_structures/deallocating_vector.hpp" #include "../data_structures/deallocating_vector.hpp"
#include "../data_structures/import_edge.hpp" #include "../data_structures/import_edge.hpp"
#include "../data_structures/query_node.hpp" #include "../data_structures/query_node.hpp"
#include "../data_structures/percent.hpp" #include "../data_structures/percent.hpp"
#include "../data_structures/restriction.hpp"
#include "../data_structures/restriction_map.hpp"
#include "../data_structures/turn_instructions.hpp"
#include "../util/integer_range.hpp" #include "../util/integer_range.hpp"
#include "../util/simple_logger.hpp" #include "../util/simple_logger.hpp"
@ -43,17 +40,13 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../util/timing_util.hpp" #include "../util/timing_util.hpp"
#include <osrm/coordinate.hpp> #include <osrm/coordinate.hpp>
#include <boost/assert.hpp> #include <boost/assert.hpp>
#include <tbb/parallel_sort.h>
#include <cstdint> #include <cstdint>
#include <memory> #include <memory>
#include <algorithm>
#include <climits>
#include <stack> #include <stack>
#include <unordered_map>
#include <unordered_set>
#include <vector> #include <vector>
template <typename GraphT> class TarjanSCC template <typename GraphT> class TarjanSCC
@ -75,27 +68,21 @@ template <typename GraphT> class TarjanSCC
std::vector<unsigned> components_index; std::vector<unsigned> components_index;
std::vector<NodeID> component_size_vector; std::vector<NodeID> component_size_vector;
std::shared_ptr<GraphT> m_node_based_graph; std::shared_ptr<const GraphT> m_graph;
std::unordered_set<NodeID> barrier_node_set;
RestrictionMap m_restriction_map;
std::size_t size_one_counter; std::size_t size_one_counter;
public: public:
template <class ContainerT> TarjanSCC(std::shared_ptr<const GraphT> graph)
TarjanSCC(std::shared_ptr<GraphT> graph, : components_index(graph->GetNumberOfNodes(), SPECIAL_NODEID), m_graph(graph),
const RestrictionMap &restrictions, size_one_counter(0)
const ContainerT &barrier_node_list)
: components_index(graph->GetNumberOfNodes(), SPECIAL_NODEID), m_node_based_graph(graph),
m_restriction_map(restrictions), size_one_counter(0)
{ {
barrier_node_set.insert(std::begin(barrier_node_list), std::end(barrier_node_list)); BOOST_ASSERT(m_graph->GetNumberOfNodes() > 0);
BOOST_ASSERT(m_node_based_graph->GetNumberOfNodes() > 0);
} }
void run() void run()
{ {
TIMER_START(SCC_RUN); TIMER_START(SCC_RUN);
const NodeID max_node_id = m_node_based_graph->GetNumberOfNodes(); const NodeID max_node_id = m_graph->GetNumberOfNodes();
// The following is a hack to distinguish between stuff that happens // The following is a hack to distinguish between stuff that happens
// before the recursive call and stuff that happens after // before the recursive call and stuff that happens after
@ -140,30 +127,9 @@ template <typename GraphT> class TarjanSCC
tarjan_node_list[v].on_stack = true; tarjan_node_list[v].on_stack = true;
++index; ++index;
const NodeID to_node_of_only_restriction = for (const auto current_edge : m_graph->GetAdjacentEdgeRange(v))
m_restriction_map.CheckForEmanatingIsOnlyTurn(u, v);
for (const auto current_edge : m_node_based_graph->GetAdjacentEdgeRange(v))
{ {
const auto vprime = m_node_based_graph->GetTarget(current_edge); const auto vprime = m_graph->GetTarget(current_edge);
// Traverse outgoing edges
if (barrier_node_set.find(v) != barrier_node_set.end() && u != vprime)
{
continue;
}
if (to_node_of_only_restriction != std::numeric_limits<unsigned>::max() &&
vprime == to_node_of_only_restriction)
{
// At an only_-restriction but not at the right turn
// continue;
}
if (m_restriction_map.CheckIfTurnIsRestricted(u, v, vprime))
{
// continue;
}
if (SPECIAL_NODEID == tarjan_node_list[vprime].index) if (SPECIAL_NODEID == tarjan_node_list[vprime].index)
{ {
@ -182,9 +148,8 @@ template <typename GraphT> class TarjanSCC
else else
{ {
processing_node_before_recursion[v] = true; processing_node_before_recursion[v] = true;
tarjan_node_list[currentFrame.parent].low_link = tarjan_node_list[u].low_link =
std::min(tarjan_node_list[currentFrame.parent].low_link, std::min(tarjan_node_list[u].low_link, tarjan_node_list[v].low_link);
tarjan_node_list[v].low_link);
// after recursion, lets do cycle checking // after recursion, lets do cycle checking
// Check if we found a cycle. This is the bottom part of the recursion // Check if we found a cycle. This is the bottom part of the recursion
if (tarjan_node_list[v].low_link == tarjan_node_list[v].index) if (tarjan_node_list[v].low_link == tarjan_node_list[v].index)
@ -228,12 +193,12 @@ template <typename GraphT> class TarjanSCC
std::size_t get_size_one_count() const { return size_one_counter; } std::size_t get_size_one_count() const { return size_one_counter; }
unsigned get_component_size(const NodeID node) const unsigned get_component_size(const unsigned component_id) const
{ {
return component_size_vector[components_index[node]]; return component_size_vector[component_id];
} }
unsigned get_component_id(const NodeID node) const { return components_index[node]; } unsigned get_component_id(const NodeID node) const { return components_index[node]; }
}; };
#endif /* TINY_COMPONENTS_HPP */ #endif /* TARJAN_SCC_HPP */

View File

@ -0,0 +1,106 @@
/*
Copyright (c) 2015, Project OSRM contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TRIP_BRUTE_FORCE_HPP
#define TRIP_BRUTE_FORCE_HPP
#include "../data_structures/search_engine.hpp"
#include "../util/dist_table_wrapper.hpp"
#include "../util/simple_logger.hpp"
#include <osrm/json_container.hpp>
#include <cstdlib>
#include <algorithm>
#include <string>
#include <iterator>
#include <vector>
#include <limits>
namespace osrm
{
namespace trip
{
// computes the distance of a given permutation
EdgeWeight ReturnDistance(const DistTableWrapper<EdgeWeight> &dist_table,
const std::vector<NodeID> &location_order,
const EdgeWeight min_route_dist,
const std::size_t component_size)
{
EdgeWeight route_dist = 0;
std::size_t i = 0;
while (i < location_order.size() && (route_dist < min_route_dist))
{
route_dist += dist_table(location_order[i], location_order[(i + 1) % component_size]);
BOOST_ASSERT_MSG(dist_table(location_order[i], location_order[(i + 1) % component_size]) !=
INVALID_EDGE_WEIGHT,
"invalid route found");
++i;
}
return route_dist;
}
// computes the route by computing all permutations and selecting the shortest
template <typename NodeIDIterator>
std::vector<NodeID> BruteForceTrip(const NodeIDIterator start,
const NodeIDIterator end,
const std::size_t number_of_locations,
const DistTableWrapper<EdgeWeight> &dist_table)
{
const auto component_size = std::distance(start, end);
std::vector<NodeID> perm(start, end);
std::vector<NodeID> route;
route.reserve(component_size);
EdgeWeight min_route_dist = INVALID_EDGE_WEIGHT;
// check length of all possible permutation of the component ids
BOOST_ASSERT_MSG(perm.size() > 0, "no permutation given");
BOOST_ASSERT_MSG(*(std::max_element(std::begin(perm), std::end(perm))) < number_of_locations,
"invalid node id");
BOOST_ASSERT_MSG(*(std::min_element(std::begin(perm), std::end(perm))) >= 0, "invalid node id");
do
{
const auto new_distance = ReturnDistance(dist_table, perm, min_route_dist, component_size);
if (new_distance <= min_route_dist)
{
min_route_dist = new_distance;
route = perm;
}
} while (std::next_permutation(std::begin(perm), std::end(perm)));
return route;
}
} // end namespace trip
} // end namespace osrm
#endif // TRIP_BRUTE_FORCE_HPP

View File

@ -0,0 +1,215 @@
/*
Copyright (c) 2015, Project OSRM contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TRIP_FARTHEST_INSERTION_HPP
#define TRIP_FARTHEST_INSERTION_HPP
#include "../data_structures/search_engine.hpp"
#include "../util/dist_table_wrapper.hpp"
#include <osrm/json_container.hpp>
#include <boost/assert.hpp>
#include <cstdlib>
#include <algorithm>
#include <string>
#include <vector>
#include <limits>
namespace osrm
{
namespace trip
{
// given a route and a new location, find the best place of insertion and
// check the distance of roundtrip when the new location is additionally visited
using NodeIDIter = std::vector<NodeID>::iterator;
std::pair<EdgeWeight, NodeIDIter>
GetShortestRoundTrip(const NodeID new_loc,
const DistTableWrapper<EdgeWeight> &dist_table,
const std::size_t number_of_locations,
std::vector<NodeID> &route)
{
auto min_trip_distance = INVALID_EDGE_WEIGHT;
NodeIDIter next_insert_point_candidate;
// for all nodes in the current trip find the best insertion resulting in the shortest path
// assert min 2 nodes in route
const auto start = std::begin(route);
const auto end = std::end(route);
for (auto from_node = start; from_node != end; ++from_node)
{
auto to_node = std::next(from_node);
if (to_node == end)
{
to_node = start;
}
const auto dist_from = dist_table(*from_node, new_loc);
const auto dist_to = dist_table(new_loc, *to_node);
const auto trip_dist = dist_from + dist_to - dist_table(*from_node, *to_node);
BOOST_ASSERT_MSG(dist_from != INVALID_EDGE_WEIGHT, "distance has invalid edge weight");
BOOST_ASSERT_MSG(dist_to != INVALID_EDGE_WEIGHT, "distance has invalid edge weight");
BOOST_ASSERT_MSG(trip_dist >= 0, "previous trip was not minimal. something's wrong");
// from all possible insertions to the current trip, choose the shortest of all insertions
if (trip_dist < min_trip_distance)
{
min_trip_distance = trip_dist;
next_insert_point_candidate = to_node;
}
}
BOOST_ASSERT_MSG(min_trip_distance != INVALID_EDGE_WEIGHT, "trip has invalid edge weight");
return std::make_pair(min_trip_distance, next_insert_point_candidate);
}
template <typename NodeIDIterator>
// given two initial start nodes, find a roundtrip route using the farthest insertion algorithm
std::vector<NodeID> FindRoute(const std::size_t &number_of_locations,
const std::size_t &component_size,
const NodeIDIterator &start,
const NodeIDIterator &end,
const DistTableWrapper<EdgeWeight> &dist_table,
const NodeID &start1,
const NodeID &start2)
{
BOOST_ASSERT_MSG(number_of_locations >= component_size,
"component size bigger than total number of locations");
std::vector<NodeID> route;
route.reserve(number_of_locations);
// tracks which nodes have been already visited
std::vector<bool> visited(number_of_locations, false);
visited[start1] = true;
visited[start2] = true;
route.push_back(start1);
route.push_back(start2);
// add all other nodes missing (two nodes are already in the initial start trip)
for (std::size_t j = 2; j < component_size; ++j)
{
auto farthest_distance = 0;
auto next_node = -1;
NodeIDIter next_insert_point;
// find unvisited loc i that is the farthest away from all other visited locs
for (auto i = start; i != end; ++i)
{
// find the shortest distance from i to all visited nodes
if (!visited[*i])
{
const auto insert_candidate =
GetShortestRoundTrip(*i, dist_table, number_of_locations, route);
BOOST_ASSERT_MSG(insert_candidate.first != INVALID_EDGE_WEIGHT,
"shortest round trip is invalid");
// add the location to the current trip such that it results in the shortest total
// tour
if (insert_candidate.first >= farthest_distance)
{
farthest_distance = insert_candidate.first;
next_node = *i;
next_insert_point = insert_candidate.second;
}
}
}
BOOST_ASSERT_MSG(next_node >= 0, "next node to visit is invalid");
// mark as visited and insert node
visited[next_node] = true;
route.insert(next_insert_point, next_node);
}
return route;
}
template <typename NodeIDIterator>
std::vector<NodeID> FarthestInsertionTrip(const NodeIDIterator &start,
const NodeIDIterator &end,
const std::size_t number_of_locations,
const DistTableWrapper<EdgeWeight> &dist_table)
{
//////////////////////////////////////////////////////////////////////////////////////////////////
// START FARTHEST INSERTION HERE
// 1. start at a random round trip of 2 locations
// 2. find the location that is the farthest away from the visited locations and whose insertion
// will make the round trip the longest
// 3. add the found location to the current round trip such that round trip is the shortest
// 4. repeat 2-3 until all locations are visited
// 5. DONE!
//////////////////////////////////////////////////////////////////////////////////////////////////
const auto component_size = std::distance(start, end);
BOOST_ASSERT(component_size >= 0);
auto max_from = -1;
auto max_to = -1;
if (static_cast<std::size_t>(component_size) == number_of_locations)
{
// find the pair of location with the biggest distance and make the pair the initial start
// trip
const auto index = std::distance(
std::begin(dist_table), std::max_element(std::begin(dist_table), std::end(dist_table)));
max_from = index / number_of_locations;
max_to = index % number_of_locations;
}
else
{
auto max_dist = 0;
for (auto x = start; x != end; ++x)
{
for (auto y = start; y != end; ++y)
{
const auto xy_dist = dist_table(*x, *y);
if (xy_dist > max_dist)
{
max_dist = xy_dist;
max_from = *x;
max_to = *y;
}
}
}
}
BOOST_ASSERT(max_from >= 0);
BOOST_ASSERT(max_to >= 0);
BOOST_ASSERT_MSG(static_cast<std::size_t>(max_from) < number_of_locations, "start node");
BOOST_ASSERT_MSG(static_cast<std::size_t>(max_to) < number_of_locations, "start node");
return FindRoute(number_of_locations, component_size, start, end, dist_table, max_from, max_to);
}
} // end namespace trip
} // end namespace osrm
#endif // TRIP_FARTHEST_INSERTION_HPP

View File

@ -0,0 +1,122 @@
/*
Copyright (c) 2015, Project OSRM contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TRIP_NEAREST_NEIGHBOUR_HPP
#define TRIP_NEAREST_NEIGHBOUR_HPP
#include "../data_structures/search_engine.hpp"
#include "../util/simple_logger.hpp"
#include "../util/dist_table_wrapper.hpp"
#include <osrm/json_container.hpp>
#include <cstdlib>
#include <algorithm>
#include <string>
#include <vector>
#include <limits>
namespace osrm
{
namespace trip
{
template <typename NodeIDIterator>
std::vector<NodeID> NearestNeighbourTrip(const NodeIDIterator &start,
const NodeIDIterator &end,
const std::size_t number_of_locations,
const DistTableWrapper<EdgeWeight> &dist_table)
{
//////////////////////////////////////////////////////////////////////////////////////////////////
// START GREEDY NEAREST NEIGHBOUR HERE
// 1. grab a random location and mark as starting point
// 2. find the nearest unvisited neighbour, set it as the current location and mark as visited
// 3. repeat 2 until there is no unvisited location
// 4. return route back to starting point
// 5. compute route
// 6. repeat 1-5 with different starting points and choose iteration with shortest trip
// 7. DONE!
//////////////////////////////////////////////////////////////////////////////////////////////////
std::vector<NodeID> route;
route.reserve(number_of_locations);
const auto component_size = std::distance(start, end);
auto shortest_trip_distance = INVALID_EDGE_WEIGHT;
// ALWAYS START AT ANOTHER STARTING POINT
for (auto start_node = start; start_node != end; ++start_node)
{
NodeID curr_node = *start_node;
std::vector<NodeID> curr_route;
curr_route.reserve(component_size);
curr_route.push_back(*start_node);
// visited[i] indicates whether node i was already visited by the salesman
std::vector<bool> visited(number_of_locations, false);
visited[*start_node] = true;
// 3. REPEAT FOR EVERY UNVISITED NODE
EdgeWeight trip_dist = 0;
for (std::size_t via_point = 1; via_point < component_size; ++via_point)
{
EdgeWeight min_dist = INVALID_EDGE_WEIGHT;
NodeID min_id = SPECIAL_NODEID;
// 2. FIND NEAREST NEIGHBOUR
for (auto next = start; next != end; ++next)
{
const auto curr_dist = dist_table(curr_node, *next);
BOOST_ASSERT_MSG(curr_dist != INVALID_EDGE_WEIGHT, "invalid distance found");
if (!visited[*next] && curr_dist < min_dist)
{
min_dist = curr_dist;
min_id = *next;
}
}
BOOST_ASSERT_MSG(min_id != SPECIAL_NODEID, "no next node found");
visited[min_id] = true;
curr_route.push_back(min_id);
trip_dist += min_dist;
curr_node = min_id;
}
// check round trip with this starting point is shorter than the shortest round trip found
// till now
if (trip_dist < shortest_trip_distance)
{
shortest_trip_distance = trip_dist;
route = std::move(curr_route);
}
}
return route;
}
} // end namespace trip
} // end namespace osrm
#endif // TRIP_NEAREST_NEIGHBOUR_HPP

View File

@ -0,0 +1,64 @@
/*
Copyright (c) 2015, Project OSRM contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TRIP_BRUTE_FORCE_HPP
#define TRIP_BRUTE_FORCE_HPP
#include "../data_structures/search_engine.hpp"
#include "../util/simple_logger.hpp"
#include <osrm/json_container.hpp>
#include <cstdlib>
#include <algorithm>
#include <string>
#include <vector>
#include <limits>
namespace osrm
{
namespace trip
{
// todo: yet to be implemented
void TabuSearchTrip(std::vector<unsigned> &location,
const PhantomNodeArray &phantom_node_vector,
const std::vector<EdgeWeight> &dist_table,
InternalRouteResult &min_route,
std::vector<int> &min_loc_permutation)
{
}
void TabuSearchTrip(const PhantomNodeArray &phantom_node_vector,
const std::vector<EdgeWeight> &dist_table,
InternalRouteResult &min_route,
std::vector<int> &min_loc_permutation)
{
}
}
}
#endif // TRIP_BRUTE_FORCE_HPP

View File

@ -2,33 +2,47 @@
SETLOCAL SETLOCAL
SET EL=0 SET EL=0
ECHO ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %~f0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
SET PROJECT_DIR=%CD%
ECHO PROJECT_DIR^: %PROJECT_DIR%
ECHO activating VS command prompt ...
SET PATH=C:\Program Files (x86)\MSBuild\14.0\Bin;%PATH%
CALL "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64
ECHO platform^: %platform% ECHO platform^: %platform%
:: HARDCODE "x64" as it is uppercase on AppVeyor and download from S3 is case sensitive :: HARDCODE "x64" as it is uppercase on AppVeyor and download from S3 is case sensitive
SET DEPSPKG=osrm-deps-win-x64-14.0.7z SET DEPSPKG=osrm-deps-win-x64-14.0.7z
:: local development :: local development
IF "%computername%"=="MB" GOTO SKIPDL ECHO LOCAL_DEV^: %LOCAL_DEV%
IF NOT DEFINED LOCAL_DEV SET LOCAL_DEV=0
IF DEFINED LOCAL_DEV IF %LOCAL_DEV% EQU 1 IF EXIST %DEPSPKG% ECHO skipping deps download && GOTO SKIPDL
IF EXIST %DEPSPKG% DEL %DEPSPKG% IF EXIST %DEPSPKG% DEL %DEPSPKG%
IF %ERRORLEVEL% NEQ 0 GOTO ERROR IF %ERRORLEVEL% NEQ 0 GOTO ERROR
ECHO downloading %DEPSPKG% ECHO downloading %DEPSPKG%
powershell Invoke-WebRequest https://mapbox.s3.amazonaws.com/windows-builds/windows-build-deps/$env:DEPSPKG -OutFile C:\projects\osrm\$env:DEPSPKG powershell Invoke-WebRequest https://mapbox.s3.amazonaws.com/windows-builds/windows-build-deps/$env:DEPSPKG -OutFile $env:PROJECT_DIR\$env:DEPSPKG
IF %ERRORLEVEL% NEQ 0 GOTO ERROR
:SKIPDL
IF EXIST osrm-deps ECHO deleting osrm-deps... && RD /S /Q osrm-deps
IF %ERRORLEVEL% NEQ 0 GOTO ERROR
IF EXIST build ECHO deletings build dir... && RD /S /Q build
IF %ERRORLEVEL% NEQ 0 GOTO ERROR IF %ERRORLEVEL% NEQ 0 GOTO ERROR
7z -y x %DEPSPKG% | %windir%\system32\FIND "ing archive" 7z -y x %DEPSPKG% | %windir%\system32\FIND "ing archive"
IF %ERRORLEVEL% NEQ 0 GOTO ERROR IF %ERRORLEVEL% NEQ 0 GOTO ERROR
:SKIPDL MKDIR build
IF EXIST build rd /s /q build
IF %ERRORLEVEL% NEQ 0 GOTO ERROR
mkdir build
IF %ERRORLEVEL% NEQ 0 GOTO ERROR IF %ERRORLEVEL% NEQ 0 GOTO ERROR
cd build cd build
IF %ERRORLEVEL% NEQ 0 GOTO ERROR IF %ERRORLEVEL% NEQ 0 GOTO ERROR
SET OSRMDEPSDIR=c:\projects\osrm\osrm-deps SET OSRMDEPSDIR=%PROJECT_DIR%\osrm-deps
set PREFIX=%OSRMDEPSDIR%/libs set PREFIX=%OSRMDEPSDIR%/libs
set BOOST_ROOT=%OSRMDEPSDIR%/boost set BOOST_ROOT=%OSRMDEPSDIR%/boost
set TBB_INSTALL_DIR=%OSRMDEPSDIR%/tbb set TBB_INSTALL_DIR=%OSRMDEPSDIR%/tbb
@ -38,7 +52,7 @@ ECHO calling cmake ....
cmake .. ^ cmake .. ^
-G "Visual Studio 14 Win64" ^ -G "Visual Studio 14 Win64" ^
-DBOOST_ROOT=%BOOST_ROOT% ^ -DBOOST_ROOT=%BOOST_ROOT% ^
-DBoost_ADDITIONAL_VERSIONS=1.57 ^ -DBoost_ADDITIONAL_VERSIONS=1.58 ^
-DBoost_USE_MULTITHREADED=ON ^ -DBoost_USE_MULTITHREADED=ON ^
-DBoost_USE_STATIC_LIBS=ON ^ -DBoost_USE_STATIC_LIBS=ON ^
-DCMAKE_BUILD_TYPE=%CONFIGURATION% ^ -DCMAKE_BUILD_TYPE=%CONFIGURATION% ^
@ -60,54 +74,47 @@ msbuild OSRM.sln ^
/flp2:logfile=build_warnings.txt;warningsonly /flp2:logfile=build_warnings.txt;warningsonly
IF %ERRORLEVEL% NEQ 0 GOTO ERROR IF %ERRORLEVEL% NEQ 0 GOTO ERROR
ECHO ========= TODO^: CREATE PACKAGES ========== CD %PROJECT_DIR%\build
CD c:\projects\osrm\build\%Configuration%
IF %ERRORLEVEL% NEQ 0 GOTO ERROR IF %ERRORLEVEL% NEQ 0 GOTO ERROR
SET PATH=c:\projects\osrm\osrm-deps\libs\bin;%PATH% SET PATH=%PROJECT_DIR%\osrm-deps\libs\bin;%PATH%
ECHO running datastructure-tests.exe ... ECHO running datastructure-tests.exe ...
datastructure-tests.exe %Configuration%\datastructure-tests.exe
IF %ERRORLEVEL% NEQ 0 GOTO ERROR IF %ERRORLEVEL% NEQ 0 GOTO ERROR
ECHO running algorithm-tests.exe ... ECHO running algorithm-tests.exe ...
algorithm-tests.exe %Configuration%\algorithm-tests.exe
IF %ERRORLEVEL% NEQ 0 GOTO ERROR
IF NOT "%APPVEYOR_REPO_BRANCH%"=="develop" GOTO DONE
ECHO ========= CREATING PACKAGES ==========
CD %PROJECT_DIR%\build\%Configuration%
IF %ERRORLEVEL% NEQ 0 GOTO ERROR
SET P=%PROJECT_DIR%
SET ZIP= %P%\osrm_%Configuration%.zip
IF EXIST %ZIP% ECHO deleting %ZIP% && DEL /F /Q %ZIP%
IF %ERRORLEVEL% NEQ 0 ECHO deleting %ZIP% FAILED && GOTO ERROR
7z a %ZIP% *.lib *.exe *.pdb %P%/osrm-deps/libs/bin/*.dll -tzip -mx9 | %windir%\system32\FIND "ing archive"
IF %ERRORLEVEL% NEQ 0 GOTO ERROR
CD ..\..\profiles
IF %ERRORLEVEL% NEQ 0 GOTO ERROR
ECHO disk=c:\temp\stxxl,10000,wincall > .stxxl.txt
7z a %ZIP% * -tzip -mx9 | %windir%\system32\FIND "ing archive"
IF %ERRORLEVEL% NEQ 0 GOTO ERROR IF %ERRORLEVEL% NEQ 0 GOTO ERROR
GOTO DONE GOTO DONE
:ERROR :ERROR
ECHO ~~~~~~~~~~~~~~~~~~~~~~ ERROR %~f0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ECHO ERRORLEVEL^: %ERRORLEVEL%
SET EL=%ERRORLEVEL% SET EL=%ERRORLEVEL%
ECHO ============== ERROR ===============
:DONE :DONE
ECHO ============= DONE =============== ECHO ~~~~~~~~~~~~~~~~~~~~~~ DONE %~f0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
CD C:\projects\osrm
EXIT /b %EL% EXIT /b %EL%
- cd c:/projects/osrm
- mkdir build
- cd build
- echo Running cmake...
- call "%VS120COMNTOOLS%\..\..\VC\vcvarsall.bat" x86_amd64
- SET PATH=C:\Program Files (x86)\MSBuild\12.0\bin\;%PATH%
- SET P=c:/projects/osrm
- set TBB_INSTALL_DIR=%P%/tbb
- set TBB_ARCH_PLATFORM=intel64/vc12
- cmake .. -G "Visual Studio 14 Win64" -DCMAKE_BUILD_TYPE=%Configuration% -DCMAKE_INSTALL_PREFIX=%P%/libs -DBOOST_ROOT=%P%/boost_min -DBoost_ADDITIONAL_VERSIONS=1.57 -DBoost_USE_STATIC_LIBS=ON
- SET PLATFORM_TOOLSET=v140
- SET TOOLS_VERSION=14.0
- msbuild /p:Platform=x64 /clp:Verbosity=minimal /toolsversion:%TOOLS_VERSION% /p:PlatformToolset=%PLATFORM_TOOLSET% /nologo OSRM.sln
- msbuild /p:Platform=x64 /clp:Verbosity=minimal /toolsversion:%TOOLS_VERSION% /p:PlatformToolset=%PLATFORM_TOOLSET% /nologo tests.vcxproj
- cd %Configuration%
- if "%APPVEYOR_REPO_BRANCH%"=="develop" (7z a %P%/osrm_%Configuration%.zip *.exe *.pdb %P%/libs/bin/*.dll -tzip)
- cd ..\..\profiles
- echo disk=c:\temp\stxxl,10000,wincall > .stxxl.txt
- if "%APPVEYOR_REPO_BRANCH%"=="develop" (7z a %P%/osrm_%Configuration%.zip * -tzip)
- set PATH=%PATH%;c:/projects/osrm/libs/bin
- cd c:/projects/osrm/build/%Configuration%
- datastructure-tests.exe
- algorithm-tests.exe

View File

@ -7,17 +7,13 @@ environment:
init: init:
- git config --global core.autocrlf input - git config --global core.autocrlf input
os: Visual Studio 2015 RC os: Visual Studio 2015
# clone directory # clone directory
clone_folder: c:\projects\osrm clone_folder: c:\projects\osrm
platform: x64 platform: x64
install:
- set PATH=C:\Program Files (x86)\MSBuild\14.0\Bin;%PATH%
- CALL "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64
build_script: build_script:
- CALL appveyor-build.bat - CALL appveyor-build.bat

View File

@ -1,19 +1,33 @@
@ECHO OFF @ECHO OFF
SETLOCAL
SET EL=0
ECHO ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ %~f0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
SET PLATFORM=x64 SET PLATFORM=x64
SET CONFIGURATION=Release SET CONFIGURATION=Release
::SET LOCAL_DEV=1
WHERE msbuild FOR /F "tokens=*" %%i in ('git rev-parse --abbrev-ref HEAD') do SET APPVEYOR_REPO_BRANCH=%%i
IF %ERRORLEVEL% EQU 0 GOTO RUNBUILD ECHO APPVEYOR_REPO_BRANCH^: %APPVEYOR_REPO_BRANCH%
SET PATH=C:\mb\windows-builds-64\tmp-bin\cmake-3.1.0-win32-x86\bin;%PATH% SET PATH=C:\mb\windows-builds-64\tmp-bin\cmake-3.1.0-win32-x86\bin;%PATH%
SET PATH=C:\Program Files\7-Zip;%PATH% SET PATH=C:\Program Files\7-Zip;%PATH%
ECHO activating VS command prompt ...
SET PATH=C:\Program Files (x86)\MSBuild\14.0\Bin;%PATH%
CALL "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64
:RUNBUILD
powershell Set-ExecutionPolicy -Scope CurrentUser -ExecutionPolicy Unrestricted -Force powershell Set-ExecutionPolicy -Scope CurrentUser -ExecutionPolicy Unrestricted -Force
IF %ERRORLEVEL% NEQ 0 GOTO ERROR
CALL appveyor-build.bat CALL appveyor-build.bat
EXIT /b %ERRORLEVEL% IF %ERRORLEVEL% NEQ 0 GOTO ERROR
GOTO DONE
:ERROR
ECHO ~~~~~~~~~~~~~~~~~~~~~~ ERROR %~f0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ECHO ERRORLEVEL^: %ERRORLEVEL%
SET EL=%ERRORLEVEL%
:DONE
ECHO ~~~~~~~~~~~~~~~~~~~~~~ DONE %~f0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
EXIT /b %EL%

View File

@ -34,7 +34,7 @@ SET(CPACK_DEBIAN_PACKAGE_SECTION "devel")
SET(CPACK_DEBIAN_PACKAGE_DESCRIPTION "Open Source Routing Machine (OSRM) is a high-performance routing engine. SET(CPACK_DEBIAN_PACKAGE_DESCRIPTION "Open Source Routing Machine (OSRM) is a high-performance routing engine.
It combines sophisticated routing algorithms with the open and free data of the OpenStreetMap." It combines sophisticated routing algorithms with the open and free data of the OpenStreetMap."
) )
SET(CPACK_DEBIAN_PACKAGE_DEPENDS "libc6-dev, libprotobuf-dev, libosmpbf-dev, libbz2-1.0, libstxxl1, libxml2, libzip2, liblua5.1-0, libtbb2, libboost-all-dev") SET(CPACK_DEBIAN_PACKAGE_DEPENDS "libc6-dev, libbz2-1.0, libstxxl1, libxml2, libzip2, liblua5.1-0, libtbb2, libboost-all-dev")
file(GLOB_RECURSE ProfileGlob ${CMAKE_SOURCE_DIR}/profiles/*) file(GLOB_RECURSE ProfileGlob ${CMAKE_SOURCE_DIR}/profiles/*)
install(FILES ${ProfileGlob} DESTINATION "share/doc/${LOWER_PROJECT_NAME}/profiles") install(FILES ${ProfileGlob} DESTINATION "share/doc/${LOWER_PROJECT_NAME}/profiles")

View File

@ -1,54 +0,0 @@
# Locate OSMPBF library
# This module defines
# OSMPBF_FOUND, if false, do not try to link to OSMPBF
# OSMPBF_LIBRARIES
# OSMPBF_INCLUDE_DIR, where to find OSMPBF.hpp
#
# Note that the expected include convention is
# #include <osmpbf/osmpbf.h>
# and not
# #include <osmpbf.h>
IF( NOT OSMPBF_FIND_QUIETLY )
MESSAGE(STATUS "Looking for OSMPBF...")
ENDIF()
FIND_PATH(OSMPBF_INCLUDE_DIR osmpbf.h
HINTS
$ENV{OSMPBF_DIR}
PATH_SUFFIXES OSMPBF include/osmpbf include
PATHS
~/Library/Frameworks
/Library/Frameworks
/usr/local
/usr
/opt/local # DarwinPorts
/opt
)
FIND_LIBRARY(OSMPBF_LIBRARY
NAMES osmpbf
HINTS
$ENV{OSMPBF_DIR}
PATH_SUFFIXES lib64 lib
PATHS
~/Library/Frameworks
/Library/Frameworks
/usr/local
/usr
/opt/local
/opt
)
INCLUDE(FindPackageHandleStandardArgs)
# handle the QUIETLY and REQUIRED arguments and set OSMPBF_FOUND to TRUE if
# all listed variables are TRUE
FIND_PACKAGE_HANDLE_STANDARD_ARGS(OSMPBF DEFAULT_MSG OSMPBF_LIBRARY OSMPBF_INCLUDE_DIR)
IF( NOT OSMPBF_FIND_QUIETLY )
IF( OSMPBF_FOUND )
MESSAGE(STATUS "Found OSMPBF: ${OSMPBF_LIBRARY}" )
ENDIF()
ENDIF()
#MARK_AS_ADVANCED(OSMPBF_INCLUDE_DIR OSMPBF_LIBRARIES OSMPBF_LIBRARY OSMPBF_LIBRARY_DBG)

View File

@ -1,10 +1,24 @@
set(OLDFILE ${SOURCE_DIR}/util/fingerprint_impl.hpp) set(OLDFILE ${SOURCE_DIR}/util/fingerprint_impl.hpp)
if (EXISTS ${OLDFILE}) set(NEWFILE ${OLDFILE}.tmp)
file(REMOVE_RECURSE ${OLDFILE}) set(INFILE ${OLDFILE}.in)
endif()
file(MD5 ${SOURCE_DIR}/prepare.cpp MD5PREPARE) file(MD5 ${SOURCE_DIR}/prepare.cpp MD5PREPARE)
file(MD5 ${SOURCE_DIR}/data_structures/static_rtree.hpp MD5RTREE) file(MD5 ${SOURCE_DIR}/data_structures/static_rtree.hpp MD5RTREE)
file(MD5 ${SOURCE_DIR}/util/graph_loader.hpp MD5GRAPH) file(MD5 ${SOURCE_DIR}/util/graph_loader.hpp MD5GRAPH)
file(MD5 ${SOURCE_DIR}/server/data_structures/internal_datafacade.hpp MD5OBJECTS) file(MD5 ${SOURCE_DIR}/server/data_structures/internal_datafacade.hpp MD5OBJECTS)
CONFIGURE_FILE(${SOURCE_DIR}/util/fingerprint_impl.hpp.in ${SOURCE_DIR}/util/fingerprint_impl.hpp) CONFIGURE_FILE(${INFILE} ${NEWFILE})
file(MD5 ${NEWFILE} MD5NEW)
if (EXISTS ${OLDFILE})
file(MD5 ${OLDFILE} MD5OLD)
if(NOT ${MD5NEW} STREQUAL ${MD5OLD})
file(REMOVE_RECURSE ${OLDFILE})
file(RENAME ${NEWFILE} ${OLDFILE})
else()
file(REMOVE_RECURSE ${NEWFILE})
message(STATUS "Fingerprint unchanged, not regenerating")
endif()
else()
file(RENAME ${NEWFILE} ${OLDFILE})
endif()

View File

@ -284,7 +284,7 @@ class Contractor
~Contractor() {} ~Contractor() {}
void Run() void Run( double core_factor = 1.0 )
{ {
// for the preperation we can use a big grain size, which is much faster (probably cache) // for the preperation we can use a big grain size, which is much faster (probably cache)
constexpr size_t InitGrainSize = 100000; constexpr size_t InitGrainSize = 100000;
@ -306,12 +306,13 @@ class Contractor
std::vector<RemainingNodeData> remaining_nodes(number_of_nodes); std::vector<RemainingNodeData> remaining_nodes(number_of_nodes);
std::vector<float> node_priorities(number_of_nodes); std::vector<float> node_priorities(number_of_nodes);
std::vector<NodePriorityData> node_data(number_of_nodes); std::vector<NodePriorityData> node_data(number_of_nodes);
is_core_node.resize(number_of_nodes, false);
// initialize priorities in parallel // initialize priorities in parallel
tbb::parallel_for(tbb::blocked_range<int>(0, number_of_nodes, InitGrainSize), tbb::parallel_for(tbb::blocked_range<int>(0, number_of_nodes, InitGrainSize),
[&remaining_nodes](const tbb::blocked_range<int> &range) [&remaining_nodes](const tbb::blocked_range<int> &range)
{ {
for (int x = range.begin(); x != range.end(); ++x) for (int x = range.begin(), end = range.end(); x != end; ++x)
{ {
remaining_nodes[x].id = x; remaining_nodes[x].id = x;
} }
@ -323,7 +324,7 @@ class Contractor
const tbb::blocked_range<int> &range) const tbb::blocked_range<int> &range)
{ {
ContractorThreadData *data = thread_data_list.getThreadData(); ContractorThreadData *data = thread_data_list.getThreadData();
for (int x = range.begin(); x != range.end(); ++x) for (int x = range.begin(), end = range.end(); x != end; ++x)
{ {
node_priorities[x] = node_priorities[x] =
this->EvaluateNodePriority(data, &node_data[x], x); this->EvaluateNodePriority(data, &node_data[x], x);
@ -333,9 +334,9 @@ class Contractor
<< std::flush; << std::flush;
bool flushed_contractor = false; bool flushed_contractor = false;
while (number_of_nodes > 2 && number_of_contracted_nodes < number_of_nodes) while (number_of_nodes > 2 && number_of_contracted_nodes < static_cast<NodeID>(number_of_nodes * core_factor) )
{ {
if (!flushed_contractor && (number_of_contracted_nodes > (number_of_nodes * 0.65))) if (!flushed_contractor && (number_of_contracted_nodes > static_cast<NodeID>(number_of_nodes * 0.65 * core_factor)))
{ {
DeallocatingVector<ContractorEdge> new_edge_set; // this one is not explicitely DeallocatingVector<ContractorEdge> new_edge_set; // this one is not explicitely
// cleared since it goes out of // cleared since it goes out of
@ -349,7 +350,7 @@ class Contractor
std::vector<float> new_node_priority(remaining_nodes.size()); std::vector<float> new_node_priority(remaining_nodes.size());
// this map gives the old IDs from the new ones, necessary to get a consistent graph // this map gives the old IDs from the new ones, necessary to get a consistent graph
// at the end of contraction // at the end of contraction
orig_node_id_to_new_id_map.resize(remaining_nodes.size()); orig_node_id_from_new_node_id_map.resize(remaining_nodes.size());
// this map gives the new IDs from the old ones, necessary to remap targets from the // this map gives the new IDs from the old ones, necessary to remap targets from the
// remaining graph // remaining graph
std::vector<NodeID> new_node_id_from_orig_id_map(number_of_nodes, UINT_MAX); std::vector<NodeID> new_node_id_from_orig_id_map(number_of_nodes, UINT_MAX);
@ -359,7 +360,7 @@ class Contractor
for (const auto new_node_id : osrm::irange<std::size_t>(0, remaining_nodes.size())) for (const auto new_node_id : osrm::irange<std::size_t>(0, remaining_nodes.size()))
{ {
// create renumbering maps in both directions // create renumbering maps in both directions
orig_node_id_to_new_id_map[new_node_id] = remaining_nodes[new_node_id].id; orig_node_id_from_new_node_id_map[new_node_id] = remaining_nodes[new_node_id].id;
new_node_id_from_orig_id_map[remaining_nodes[new_node_id].id] = new_node_id; new_node_id_from_orig_id_map[remaining_nodes[new_node_id].id] = new_node_id;
new_node_priority[new_node_id] = new_node_priority[new_node_id] =
node_priorities[remaining_nodes[new_node_id].id]; node_priorities[remaining_nodes[new_node_id].id];
@ -429,7 +430,7 @@ class Contractor
{ {
ContractorThreadData *data = thread_data_list.getThreadData(); ContractorThreadData *data = thread_data_list.getThreadData();
// determine independent node set // determine independent node set
for (int i = range.begin(); i != range.end(); ++i) for (int i = range.begin(), end = range.end(); i != end; ++i)
{ {
const NodeID node = remaining_nodes[i].id; const NodeID node = remaining_nodes[i].id;
remaining_nodes[i].is_independent = remaining_nodes[i].is_independent =
@ -450,7 +451,7 @@ class Contractor
[this, &remaining_nodes, &thread_data_list](const tbb::blocked_range<int> &range) [this, &remaining_nodes, &thread_data_list](const tbb::blocked_range<int> &range)
{ {
ContractorThreadData *data = thread_data_list.getThreadData(); ContractorThreadData *data = thread_data_list.getThreadData();
for (int position = range.begin(); position != range.end(); ++position) for (int position = range.begin(), end = range.end(); position != end; ++position)
{ {
const NodeID x = remaining_nodes[position].id; const NodeID x = remaining_nodes[position].id;
this->ContractNode<false>(data, x); this->ContractNode<false>(data, x);
@ -469,7 +470,7 @@ class Contractor
[this, &remaining_nodes, &thread_data_list](const tbb::blocked_range<int> &range) [this, &remaining_nodes, &thread_data_list](const tbb::blocked_range<int> &range)
{ {
ContractorThreadData *data = thread_data_list.getThreadData(); ContractorThreadData *data = thread_data_list.getThreadData();
for (int position = range.begin(); position != range.end(); ++position) for (int position = range.begin(), end = range.end(); position != end; ++position)
{ {
const NodeID x = remaining_nodes[position].id; const NodeID x = remaining_nodes[position].id;
this->DeleteIncomingEdges(data, x); this->DeleteIncomingEdges(data, x);
@ -507,7 +508,7 @@ class Contractor
const tbb::blocked_range<int> &range) const tbb::blocked_range<int> &range)
{ {
ContractorThreadData *data = thread_data_list.getThreadData(); ContractorThreadData *data = thread_data_list.getThreadData();
for (int position = range.begin(); position != range.end(); ++position) for (int position = range.begin(), end = range.end(); position != end; ++position)
{ {
NodeID x = remaining_nodes[position].id; NodeID x = remaining_nodes[position].id;
this->UpdateNodeNeighbours(node_priorities, node_data, data, x); this->UpdateNodeNeighbours(node_priorities, node_data, data, x);
@ -524,7 +525,7 @@ class Contractor
// unsigned quaddegree = 0; // unsigned quaddegree = 0;
// //
// for(unsigned i = 0; i < remaining_nodes.size(); ++i) { // for(unsigned i = 0; i < remaining_nodes.size(); ++i) {
// unsigned degree = contractor_graph->EndEdges(remaining_nodes[i].first) // unsigned degree = contractor_graph->EndEdges(remaining_nodes[i].id)
// - // -
// contractor_graph->BeginEdges(remaining_nodes[i].first); // contractor_graph->BeginEdges(remaining_nodes[i].first);
// if(degree > maxdegree) // if(degree > maxdegree)
@ -546,9 +547,32 @@ class Contractor
p.printStatus(number_of_contracted_nodes); p.printStatus(number_of_contracted_nodes);
} }
if (remaining_nodes.size() > 2)
{
// TODO: for small cores a sorted array of core ids might also work good
for (const auto& node : remaining_nodes)
{
auto orig_id = orig_node_id_from_new_node_id_map[node.id];
is_core_node[orig_id] = true;
}
}
else
{
// in this case we don't need core markers since we fully contracted
// the graph
is_core_node.clear();
}
SimpleLogger().Write() << "[core] " << remaining_nodes.size() << " nodes " << contractor_graph->GetNumberOfEdges() << " edges." << std::endl;
thread_data_list.data.clear(); thread_data_list.data.clear();
} }
inline void GetCoreMarker(std::vector<bool> &out_is_core_node)
{
out_is_core_node.swap(is_core_node);
}
template <class Edge> inline void GetEdges(DeallocatingVector<Edge> &edges) template <class Edge> inline void GetEdges(DeallocatingVector<Edge> &edges)
{ {
Percent p(contractor_graph->GetNumberOfNodes()); Percent p(contractor_graph->GetNumberOfNodes());
@ -564,10 +588,10 @@ class Contractor
{ {
const NodeID target = contractor_graph->GetTarget(edge); const NodeID target = contractor_graph->GetTarget(edge);
const ContractorGraph::EdgeData &data = contractor_graph->GetEdgeData(edge); const ContractorGraph::EdgeData &data = contractor_graph->GetEdgeData(edge);
if (!orig_node_id_to_new_id_map.empty()) if (!orig_node_id_from_new_node_id_map.empty())
{ {
new_edge.source = orig_node_id_to_new_id_map[node]; new_edge.source = orig_node_id_from_new_node_id_map[node];
new_edge.target = orig_node_id_to_new_id_map[target]; new_edge.target = orig_node_id_from_new_node_id_map[target];
} }
else else
{ {
@ -578,9 +602,10 @@ class Contractor
BOOST_ASSERT_MSG(UINT_MAX != new_edge.target, "Target id invalid"); BOOST_ASSERT_MSG(UINT_MAX != new_edge.target, "Target id invalid");
new_edge.data.distance = data.distance; new_edge.data.distance = data.distance;
new_edge.data.shortcut = data.shortcut; new_edge.data.shortcut = data.shortcut;
if (!data.is_original_via_node_ID && !orig_node_id_to_new_id_map.empty()) if (!data.is_original_via_node_ID && !orig_node_id_from_new_node_id_map.empty())
{ {
new_edge.data.id = orig_node_id_to_new_id_map[data.id]; // tranlate the _node id_ of the shortcutted node
new_edge.data.id = orig_node_id_from_new_node_id_map[data.id];
} }
else else
{ {
@ -595,10 +620,10 @@ class Contractor
} }
} }
contractor_graph.reset(); contractor_graph.reset();
orig_node_id_to_new_id_map.clear(); orig_node_id_from_new_node_id_map.clear();
orig_node_id_to_new_id_map.shrink_to_fit(); orig_node_id_from_new_node_id_map.shrink_to_fit();
BOOST_ASSERT(0 == orig_node_id_to_new_id_map.capacity()); BOOST_ASSERT(0 == orig_node_id_from_new_node_id_map.capacity());
edges.append(external_edge_list.begin(), external_edge_list.end()); edges.append(external_edge_list.begin(), external_edge_list.end());
external_edge_list.clear(); external_edge_list.clear();
@ -956,7 +981,8 @@ class Contractor
std::shared_ptr<ContractorGraph> contractor_graph; std::shared_ptr<ContractorGraph> contractor_graph;
stxxl::vector<QueryEdge> external_edge_list; stxxl::vector<QueryEdge> external_edge_list;
std::vector<NodeID> orig_node_id_to_new_id_map; std::vector<NodeID> orig_node_id_from_new_node_id_map;
std::vector<bool> is_core_node;
XORFastHash fast_hash; XORFastHash fast_hash;
}; };

View File

@ -56,7 +56,11 @@ ContractorOptions::ParseArguments(int argc, char *argv[], ContractorConfig &cont
"Path to LUA routing profile")( "Path to LUA routing profile")(
"threads,t", boost::program_options::value<unsigned int>(&contractor_config.requested_num_threads) "threads,t", boost::program_options::value<unsigned int>(&contractor_config.requested_num_threads)
->default_value(tbb::task_scheduler_init::default_num_threads()), ->default_value(tbb::task_scheduler_init::default_num_threads()),
"Number of threads to use"); "Number of threads to use")(
"core,k", boost::program_options::value<double>(&contractor_config.core_factor)
->default_value(1.0),"Percentage of the graph (in vertices) to contract [0.1]");
// hidden options, will be allowed both on command line and in config file, but will not be // hidden options, will be allowed both on command line and in config file, but will not be
// shown to the user // shown to the user
@ -127,6 +131,7 @@ ContractorOptions::ParseArguments(int argc, char *argv[], ContractorConfig &cont
void ContractorOptions::GenerateOutputFilesNames(ContractorConfig &contractor_config) void ContractorOptions::GenerateOutputFilesNames(ContractorConfig &contractor_config)
{ {
contractor_config.node_output_path = contractor_config.osrm_input_path.string() + ".nodes"; contractor_config.node_output_path = contractor_config.osrm_input_path.string() + ".nodes";
contractor_config.core_output_path = contractor_config.osrm_input_path.string() + ".core";
contractor_config.edge_output_path = contractor_config.osrm_input_path.string() + ".edges"; contractor_config.edge_output_path = contractor_config.osrm_input_path.string() + ".edges";
contractor_config.geometry_output_path = contractor_config.osrm_input_path.string() + ".geometry"; contractor_config.geometry_output_path = contractor_config.osrm_input_path.string() + ".geometry";
contractor_config.graph_output_path = contractor_config.osrm_input_path.string() + ".hsgr"; contractor_config.graph_output_path = contractor_config.osrm_input_path.string() + ".hsgr";

View File

@ -49,6 +49,7 @@ struct ContractorConfig
boost::filesystem::path profile_path; boost::filesystem::path profile_path;
std::string node_output_path; std::string node_output_path;
std::string core_output_path;
std::string edge_output_path; std::string edge_output_path;
std::string geometry_output_path; std::string geometry_output_path;
std::string graph_output_path; std::string graph_output_path;
@ -56,6 +57,12 @@ struct ContractorConfig
std::string rtree_leafs_output_path; std::string rtree_leafs_output_path;
unsigned requested_num_threads; unsigned requested_num_threads;
//A percentage of vertices that will be contracted for the hierarchy.
//Offers a trade-off between preprocessing and query time.
//The remaining vertices form the core of the hierarchy
//(e.g. 0.8 contracts 80 percent of the hierarchy, leaving a core of 20%)
double core_factor;
}; };
struct ContractorOptions struct ContractorOptions

View File

@ -26,7 +26,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#include "edge_based_graph_factory.hpp" #include "edge_based_graph_factory.hpp"
#include "../algorithms/tiny_components.hpp"
#include "../data_structures/percent.hpp" #include "../data_structures/percent.hpp"
#include "../util/compute_angle.hpp" #include "../util/compute_angle.hpp"
#include "../util/integer_range.hpp" #include "../util/integer_range.hpp"
@ -40,21 +39,19 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <iomanip> #include <iomanip>
#include <limits> #include <limits>
EdgeBasedGraphFactory::EdgeBasedGraphFactory(std::shared_ptr<NodeBasedDynamicGraph> node_based_graph, EdgeBasedGraphFactory::EdgeBasedGraphFactory(
std::shared_ptr<RestrictionMap> restriction_map, std::shared_ptr<NodeBasedDynamicGraph> node_based_graph,
std::unique_ptr<std::vector<NodeID>> barrier_node_list, const CompressedEdgeContainer &compressed_edge_container,
std::unique_ptr<std::vector<NodeID>> traffic_light_node_list, const std::unordered_set<NodeID> &barrier_nodes,
const std::unordered_set<NodeID> &traffic_lights,
std::shared_ptr<const RestrictionMap> restriction_map,
const std::vector<QueryNode> &node_info_list, const std::vector<QueryNode> &node_info_list,
const SpeedProfileProperties &speed_profile) SpeedProfileProperties speed_profile)
: speed_profile(speed_profile), : m_max_edge_id(0), m_node_info_list(node_info_list), m_node_based_graph(std::move(node_based_graph)),
m_number_of_edge_based_nodes(std::numeric_limits<unsigned>::max()), m_restriction_map(std::move(restriction_map)), m_barrier_nodes(barrier_nodes),
m_node_info_list(node_info_list), m_traffic_lights(traffic_lights), m_compressed_edge_container(compressed_edge_container),
m_node_based_graph(std::move(node_based_graph)), speed_profile(std::move(speed_profile))
m_restriction_map(std::move(restriction_map)), max_id(0), removed_node_count(0)
{ {
// insert into unordered sets for fast lookup
m_barrier_nodes.insert(barrier_node_list->begin(), barrier_node_list->end());
m_traffic_lights.insert(traffic_light_node_list->begin(), traffic_light_node_list->end());
} }
void EdgeBasedGraphFactory::GetEdgeBasedEdges(DeallocatingVector<EdgeBasedEdge> &output_edge_list) void EdgeBasedGraphFactory::GetEdgeBasedEdges(DeallocatingVector<EdgeBasedEdge> &output_edge_list)
@ -77,9 +74,13 @@ void EdgeBasedGraphFactory::GetEdgeBasedNodes(std::vector<EdgeBasedNode> &nodes)
nodes.swap(m_edge_based_node_list); nodes.swap(m_edge_based_node_list);
} }
unsigned EdgeBasedGraphFactory::GetHighestEdgeID()
{
return m_max_edge_id;
}
void EdgeBasedGraphFactory::InsertEdgeBasedNode(const NodeID node_u, void EdgeBasedGraphFactory::InsertEdgeBasedNode(const NodeID node_u,
const NodeID node_v, const NodeID node_v)
const unsigned component_id)
{ {
// merge edges together into one EdgeBasedNode // merge edges together into one EdgeBasedNode
BOOST_ASSERT(node_u != SPECIAL_NODEID); BOOST_ASSERT(node_u != SPECIAL_NODEID);
@ -97,23 +98,21 @@ void EdgeBasedGraphFactory::InsertEdgeBasedNode(const NodeID node_u,
const EdgeData &reverse_data = m_node_based_graph->GetEdgeData(edge_id_2); const EdgeData &reverse_data = m_node_based_graph->GetEdgeData(edge_id_2);
if (forward_data.edgeBasedNodeID == SPECIAL_NODEID && if (forward_data.edge_id == SPECIAL_NODEID &&
reverse_data.edgeBasedNodeID == SPECIAL_NODEID) reverse_data.edge_id == SPECIAL_NODEID)
{ {
return; return;
} }
BOOST_ASSERT(m_geometry_compressor.HasEntryForID(edge_id_1) == BOOST_ASSERT(m_compressed_edge_container.HasEntryForID(edge_id_1) ==
m_geometry_compressor.HasEntryForID(edge_id_2)); m_compressed_edge_container.HasEntryForID(edge_id_2));
if (m_geometry_compressor.HasEntryForID(edge_id_1)) if (m_compressed_edge_container.HasEntryForID(edge_id_1))
{ {
BOOST_ASSERT(m_geometry_compressor.HasEntryForID(edge_id_2)); BOOST_ASSERT(m_compressed_edge_container.HasEntryForID(edge_id_2));
// reconstruct geometry and put in each individual edge with its offset // reconstruct geometry and put in each individual edge with its offset
const std::vector<GeometryCompressor::CompressedNode> &forward_geometry = const auto& forward_geometry = m_compressed_edge_container.GetBucketReference(edge_id_1);
m_geometry_compressor.GetBucketReference(edge_id_1); const auto& reverse_geometry = m_compressed_edge_container.GetBucketReference(edge_id_2);
const std::vector<GeometryCompressor::CompressedNode> &reverse_geometry =
m_geometry_compressor.GetBucketReference(edge_id_2);
BOOST_ASSERT(forward_geometry.size() == reverse_geometry.size()); BOOST_ASSERT(forward_geometry.size() == reverse_geometry.size());
BOOST_ASSERT(0 != forward_geometry.size()); BOOST_ASSERT(0 != forward_geometry.size());
const unsigned geometry_size = static_cast<unsigned>(forward_geometry.size()); const unsigned geometry_size = static_cast<unsigned>(forward_geometry.size());
@ -146,15 +145,6 @@ void EdgeBasedGraphFactory::InsertEdgeBasedNode(const NodeID node_u,
NodeID current_edge_source_coordinate_id = node_u; NodeID current_edge_source_coordinate_id = node_u;
if (SPECIAL_NODEID != forward_data.edgeBasedNodeID)
{
max_id = std::max(forward_data.edgeBasedNodeID, max_id);
}
if (SPECIAL_NODEID != reverse_data.edgeBasedNodeID)
{
max_id = std::max(reverse_data.edgeBasedNodeID, max_id);
}
// traverse arrays from start and end respectively // traverse arrays from start and end respectively
for (const auto i : osrm::irange(0u, geometry_size)) for (const auto i : osrm::irange(0u, geometry_size))
{ {
@ -165,12 +155,12 @@ void EdgeBasedGraphFactory::InsertEdgeBasedNode(const NodeID node_u,
// build edges // build edges
m_edge_based_node_list.emplace_back( m_edge_based_node_list.emplace_back(
forward_data.edgeBasedNodeID, reverse_data.edgeBasedNodeID, forward_data.edge_id, reverse_data.edge_id,
current_edge_source_coordinate_id, current_edge_target_coordinate_id, current_edge_source_coordinate_id, current_edge_target_coordinate_id,
forward_data.nameID, forward_geometry[i].second, forward_data.name_id, forward_geometry[i].second,
reverse_geometry[geometry_size - 1 - i].second, forward_dist_prefix_sum[i], reverse_geometry[geometry_size - 1 - i].second, forward_dist_prefix_sum[i],
reverse_dist_prefix_sum[i], m_geometry_compressor.GetPositionForID(edge_id_1), reverse_dist_prefix_sum[i], m_compressed_edge_container.GetPositionForID(edge_id_1),
component_id, i, forward_data.travel_mode, reverse_data.travel_mode); INVALID_COMPONENTID, i, forward_data.travel_mode, reverse_data.travel_mode);
current_edge_source_coordinate_id = current_edge_target_coordinate_id; current_edge_source_coordinate_id = current_edge_target_coordinate_id;
BOOST_ASSERT(m_edge_based_node_list.back().IsCompressed()); BOOST_ASSERT(m_edge_based_node_list.back().IsCompressed());
@ -187,33 +177,33 @@ void EdgeBasedGraphFactory::InsertEdgeBasedNode(const NodeID node_u,
} }
else else
{ {
BOOST_ASSERT(!m_geometry_compressor.HasEntryForID(edge_id_2)); BOOST_ASSERT(!m_compressed_edge_container.HasEntryForID(edge_id_2));
if (forward_data.edgeBasedNodeID != SPECIAL_NODEID) if (forward_data.edge_id != SPECIAL_NODEID)
{ {
BOOST_ASSERT(forward_data.forward); BOOST_ASSERT(!forward_data.reversed);
} }
else else
{ {
BOOST_ASSERT(!forward_data.forward); BOOST_ASSERT(forward_data.reversed);
} }
if (reverse_data.edgeBasedNodeID != SPECIAL_NODEID) if (reverse_data.edge_id != SPECIAL_NODEID)
{ {
BOOST_ASSERT(reverse_data.forward); BOOST_ASSERT(!reverse_data.reversed);
} }
else else
{ {
BOOST_ASSERT(!reverse_data.forward); BOOST_ASSERT(reverse_data.reversed);
} }
BOOST_ASSERT(forward_data.edgeBasedNodeID != SPECIAL_NODEID || BOOST_ASSERT(forward_data.edge_id != SPECIAL_NODEID ||
reverse_data.edgeBasedNodeID != SPECIAL_NODEID); reverse_data.edge_id != SPECIAL_NODEID);
m_edge_based_node_list.emplace_back( m_edge_based_node_list.emplace_back(
forward_data.edgeBasedNodeID, reverse_data.edgeBasedNodeID, node_u, node_v, forward_data.edge_id, reverse_data.edge_id, node_u, node_v,
forward_data.nameID, forward_data.distance, reverse_data.distance, 0, 0, SPECIAL_EDGEID, forward_data.name_id, forward_data.distance, reverse_data.distance, 0, 0, SPECIAL_EDGEID,
component_id, 0, forward_data.travel_mode, reverse_data.travel_mode); INVALID_COMPONENTID, 0, forward_data.travel_mode, reverse_data.travel_mode);
BOOST_ASSERT(!m_edge_based_node_list.back().IsCompressed()); BOOST_ASSERT(!m_edge_based_node_list.back().IsCompressed());
} }
} }
@ -231,15 +221,10 @@ void EdgeBasedGraphFactory::FlushVectorToStream(
} }
void EdgeBasedGraphFactory::Run(const std::string &original_edge_data_filename, void EdgeBasedGraphFactory::Run(const std::string &original_edge_data_filename,
const std::string &geometry_filename,
lua_State *lua_state) lua_State *lua_state)
{ {
TIMER_START(geometry);
CompressGeometry();
TIMER_STOP(geometry);
TIMER_START(renumber); TIMER_START(renumber);
RenumberEdges(); m_max_edge_id = RenumberEdges() - 1;
TIMER_STOP(renumber); TIMER_STOP(renumber);
TIMER_START(generate_nodes); TIMER_START(generate_nodes);
@ -250,193 +235,17 @@ void EdgeBasedGraphFactory::Run(const std::string &original_edge_data_filename,
GenerateEdgeExpandedEdges(original_edge_data_filename, lua_state); GenerateEdgeExpandedEdges(original_edge_data_filename, lua_state);
TIMER_STOP(generate_edges); TIMER_STOP(generate_edges);
m_geometry_compressor.SerializeInternalVector(geometry_filename);
SimpleLogger().Write() << "Timing statistics for edge-expanded graph:"; SimpleLogger().Write() << "Timing statistics for edge-expanded graph:";
SimpleLogger().Write() << "Geometry compression: " << TIMER_SEC(geometry) << "s";
SimpleLogger().Write() << "Renumbering edges: " << TIMER_SEC(renumber) << "s"; SimpleLogger().Write() << "Renumbering edges: " << TIMER_SEC(renumber) << "s";
SimpleLogger().Write() << "Generating nodes: " << TIMER_SEC(generate_nodes) << "s"; SimpleLogger().Write() << "Generating nodes: " << TIMER_SEC(generate_nodes) << "s";
SimpleLogger().Write() << "Generating edges: " << TIMER_SEC(generate_edges) << "s"; SimpleLogger().Write() << "Generating edges: " << TIMER_SEC(generate_edges) << "s";
} }
void EdgeBasedGraphFactory::CompressGeometry()
{
SimpleLogger().Write() << "Removing graph geometry while preserving topology";
const unsigned original_number_of_nodes = m_node_based_graph->GetNumberOfNodes(); /// Renumbers all _forward_ edges and sets the edge_id.
const unsigned original_number_of_edges = m_node_based_graph->GetNumberOfEdges();
Percent progress(original_number_of_nodes);
for (const NodeID node_v : osrm::irange(0u, original_number_of_nodes))
{
progress.printStatus(node_v);
// only contract degree 2 vertices
if (2 != m_node_based_graph->GetOutDegree(node_v))
{
continue;
}
// don't contract barrier node
if (m_barrier_nodes.end() != m_barrier_nodes.find(node_v))
{
continue;
}
// check if v is a via node for a turn restriction, i.e. a 'directed' barrier node
if (m_restriction_map->IsViaNode(node_v))
{
continue;
}
/*
* reverse_e2 forward_e2
* u <---------- v -----------> w
* ----------> <-----------
* forward_e1 reverse_e1
*
* Will be compressed to:
*
* reverse_e1
* u <---------- w
* ---------->
* forward_e1
*
* If the edges are compatible.
*
*/
const bool reverse_edge_order =
!(m_node_based_graph->GetEdgeData(m_node_based_graph->BeginEdges(node_v)).forward);
const EdgeID forward_e2 = m_node_based_graph->BeginEdges(node_v) + reverse_edge_order;
BOOST_ASSERT(SPECIAL_EDGEID != forward_e2);
BOOST_ASSERT(forward_e2 >= m_node_based_graph->BeginEdges(node_v) &&
forward_e2 < m_node_based_graph->EndEdges(node_v));
const EdgeID reverse_e2 = m_node_based_graph->BeginEdges(node_v) + 1 - reverse_edge_order;
BOOST_ASSERT(SPECIAL_EDGEID != reverse_e2);
BOOST_ASSERT(reverse_e2 >= m_node_based_graph->BeginEdges(node_v) &&
reverse_e2 < m_node_based_graph->EndEdges(node_v));
const EdgeData &fwd_edge_data2 = m_node_based_graph->GetEdgeData(forward_e2);
const EdgeData &rev_edge_data2 = m_node_based_graph->GetEdgeData(reverse_e2);
const NodeID node_w = m_node_based_graph->GetTarget(forward_e2);
BOOST_ASSERT(SPECIAL_NODEID != node_w);
BOOST_ASSERT(node_v != node_w);
const NodeID node_u = m_node_based_graph->GetTarget(reverse_e2);
BOOST_ASSERT(SPECIAL_NODEID != node_u);
BOOST_ASSERT(node_u != node_v);
const EdgeID forward_e1 = m_node_based_graph->FindEdge(node_u, node_v);
BOOST_ASSERT(SPECIAL_EDGEID != forward_e1);
BOOST_ASSERT(node_v == m_node_based_graph->GetTarget(forward_e1));
const EdgeID reverse_e1 = m_node_based_graph->FindEdge(node_w, node_v);
BOOST_ASSERT(SPECIAL_EDGEID != reverse_e1);
BOOST_ASSERT(node_v == m_node_based_graph->GetTarget(reverse_e1));
const EdgeData &fwd_edge_data1 = m_node_based_graph->GetEdgeData(forward_e1);
const EdgeData &rev_edge_data1 = m_node_based_graph->GetEdgeData(reverse_e1);
if (m_node_based_graph->FindEdgeInEitherDirection(node_u, node_w) != SPECIAL_EDGEID)
{
continue;
}
// this case can happen if two ways with different names overlap
if (fwd_edge_data1.nameID != rev_edge_data1.nameID ||
fwd_edge_data2.nameID != rev_edge_data2.nameID)
{
continue;
}
if (fwd_edge_data1.IsCompatibleTo(fwd_edge_data2) && rev_edge_data1.IsCompatibleTo(rev_edge_data2))
{
BOOST_ASSERT(m_node_based_graph->GetEdgeData(forward_e1).nameID ==
m_node_based_graph->GetEdgeData(reverse_e1).nameID);
BOOST_ASSERT(m_node_based_graph->GetEdgeData(forward_e2).nameID ==
m_node_based_graph->GetEdgeData(reverse_e2).nameID);
// Get distances before graph is modified
const int forward_weight1 = m_node_based_graph->GetEdgeData(forward_e1).distance;
const int forward_weight2 = m_node_based_graph->GetEdgeData(forward_e2).distance;
BOOST_ASSERT(0 != forward_weight1);
BOOST_ASSERT(0 != forward_weight2);
const int reverse_weight1 = m_node_based_graph->GetEdgeData(reverse_e1).distance;
const int reverse_weight2 = m_node_based_graph->GetEdgeData(reverse_e2).distance;
BOOST_ASSERT(0 != reverse_weight1);
BOOST_ASSERT(0 != reverse_weight2);
const bool has_node_penalty = m_traffic_lights.find(node_v) != m_traffic_lights.end();
// add weight of e2's to e1
m_node_based_graph->GetEdgeData(forward_e1).distance += fwd_edge_data2.distance;
m_node_based_graph->GetEdgeData(reverse_e1).distance += rev_edge_data2.distance;
if (has_node_penalty)
{
m_node_based_graph->GetEdgeData(forward_e1).distance +=
speed_profile.traffic_signal_penalty;
m_node_based_graph->GetEdgeData(reverse_e1).distance +=
speed_profile.traffic_signal_penalty;
}
// extend e1's to targets of e2's
m_node_based_graph->SetTarget(forward_e1, node_w);
m_node_based_graph->SetTarget(reverse_e1, node_u);
// remove e2's (if bidir, otherwise only one)
m_node_based_graph->DeleteEdge(node_v, forward_e2);
m_node_based_graph->DeleteEdge(node_v, reverse_e2);
// update any involved turn restrictions
m_restriction_map->FixupStartingTurnRestriction(node_u, node_v, node_w);
m_restriction_map->FixupArrivingTurnRestriction(node_u, node_v, node_w,
*m_node_based_graph);
m_restriction_map->FixupStartingTurnRestriction(node_w, node_v, node_u);
m_restriction_map->FixupArrivingTurnRestriction(node_w, node_v, node_u,
*m_node_based_graph);
// store compressed geometry in container
m_geometry_compressor.CompressEdge(
forward_e1, forward_e2, node_v, node_w,
forward_weight1 + (has_node_penalty ? speed_profile.traffic_signal_penalty : 0),
forward_weight2);
m_geometry_compressor.CompressEdge(
reverse_e1, reverse_e2, node_v, node_u, reverse_weight1,
reverse_weight2 + (has_node_penalty ? speed_profile.traffic_signal_penalty : 0));
++removed_node_count;
}
}
SimpleLogger().Write() << "removed " << removed_node_count << " nodes";
m_geometry_compressor.PrintStatistics();
unsigned new_node_count = 0;
unsigned new_edge_count = 0;
for (const auto i : osrm::irange(0u, m_node_based_graph->GetNumberOfNodes()))
{
if (m_node_based_graph->GetOutDegree(i) > 0)
{
++new_node_count;
new_edge_count += (m_node_based_graph->EndEdges(i) - m_node_based_graph->BeginEdges(i));
}
}
SimpleLogger().Write() << "new nodes: " << new_node_count << ", edges " << new_edge_count;
SimpleLogger().Write() << "Node compression ratio: "
<< new_node_count / (double)original_number_of_nodes;
SimpleLogger().Write() << "Edge compression ratio: "
<< new_edge_count / (double)original_number_of_edges;
}
/// Renumbers all _forward_ edges and sets the edgeBasedNodeID.
/// A specific numbering is not important. Any unique ID will do. /// A specific numbering is not important. Any unique ID will do.
void EdgeBasedGraphFactory::RenumberEdges() /// Returns the number of edge based nodes.
unsigned EdgeBasedGraphFactory::RenumberEdges()
{ {
// renumber edge based node of outgoing edges // renumber edge based node of outgoing edges
unsigned numbered_edges_count = 0; unsigned numbered_edges_count = 0;
@ -446,43 +255,26 @@ void EdgeBasedGraphFactory::RenumberEdges()
{ {
EdgeData &edge_data = m_node_based_graph->GetEdgeData(current_edge); EdgeData &edge_data = m_node_based_graph->GetEdgeData(current_edge);
// this edge is an incoming edge // only number incoming edges
if (!edge_data.forward) if (edge_data.reversed)
{ {
continue; continue;
} }
BOOST_ASSERT(numbered_edges_count < m_node_based_graph->GetNumberOfEdges()); BOOST_ASSERT(numbered_edges_count < m_node_based_graph->GetNumberOfEdges());
edge_data.edgeBasedNodeID = numbered_edges_count; edge_data.edge_id = numbered_edges_count;
++numbered_edges_count; ++numbered_edges_count;
BOOST_ASSERT(SPECIAL_NODEID != edge_data.edgeBasedNodeID); BOOST_ASSERT(SPECIAL_NODEID != edge_data.edge_id);
} }
} }
m_number_of_edge_based_nodes = numbered_edges_count;
return numbered_edges_count;
} }
/** /// Creates the nodes in the edge expanded graph from edges in the node-based graph.
* Creates the nodes in the edge expanded graph from edges in the node-based graph.
*/
void EdgeBasedGraphFactory::GenerateEdgeExpandedNodes() void EdgeBasedGraphFactory::GenerateEdgeExpandedNodes()
{ {
SimpleLogger().Write() << "Identifying components of the (compressed) road network";
// Run a BFS on the undirected graph and identify small components
TarjanSCC<NodeBasedDynamicGraph> component_explorer(m_node_based_graph, *m_restriction_map,
m_barrier_nodes);
component_explorer.run();
SimpleLogger().Write() << "identified: "
<< component_explorer.get_number_of_components() - removed_node_count
<< " (compressed) components";
SimpleLogger().Write() << "identified "
<< component_explorer.get_size_one_count() - removed_node_count
<< " (compressed) SCCs of size 1";
SimpleLogger().Write() << "generating edge-expanded nodes";
Percent progress(m_node_based_graph->GetNumberOfNodes()); Percent progress(m_node_based_graph->GetNumberOfNodes());
// loop over all edges and generate new set of nodes // loop over all edges and generate new set of nodes
@ -507,34 +299,14 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedNodes()
BOOST_ASSERT(node_u < node_v); BOOST_ASSERT(node_u < node_v);
// Note: edges that end on barrier nodes or on a turn restriction // if we found a non-forward edge reverse and try again
// may actually be in two distinct components. We choose the smallest if (edge_data.edge_id == SPECIAL_NODEID)
const unsigned size_of_component =
std::min(component_explorer.get_component_size(node_u),
component_explorer.get_component_size(node_v));
const unsigned id_of_smaller_component = [node_u, node_v, &component_explorer]
{ {
if (component_explorer.get_component_size(node_u) < InsertEdgeBasedNode(node_v, node_u);
component_explorer.get_component_size(node_v))
{
return component_explorer.get_component_id(node_u);
}
return component_explorer.get_component_id(node_v);
}();
const bool component_is_tiny = size_of_component < 1000;
// we only set edgeBasedNodeID for forward edges
if (edge_data.edgeBasedNodeID == SPECIAL_NODEID)
{
InsertEdgeBasedNode(node_v, node_u,
(component_is_tiny ? id_of_smaller_component + 1 : 0));
} }
else else
{ {
InsertEdgeBasedNode(node_u, node_v, InsertEdgeBasedNode(node_u, node_v);
(component_is_tiny ? id_of_smaller_component + 1 : 0));
} }
} }
} }
@ -543,9 +315,7 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedNodes()
<< " nodes in edge-expanded graph"; << " nodes in edge-expanded graph";
} }
/** /// Actually it also generates OriginalEdgeData and serializes them...
* Actually it also generates OriginalEdgeData and serializes them...
*/
void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges( void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
const std::string &original_edge_data_filename, lua_State *lua_state) const std::string &original_edge_data_filename, lua_State *lua_state)
{ {
@ -577,7 +347,7 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
progress.printStatus(node_u); progress.printStatus(node_u);
for (const EdgeID e1 : m_node_based_graph->GetAdjacentEdgeRange(node_u)) for (const EdgeID e1 : m_node_based_graph->GetAdjacentEdgeRange(node_u))
{ {
if (!m_node_based_graph->GetEdgeData(e1).forward) if (m_node_based_graph->GetEdgeData(e1).reversed)
{ {
continue; continue;
} }
@ -590,7 +360,7 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
for (const EdgeID e2 : m_node_based_graph->GetAdjacentEdgeRange(node_v)) for (const EdgeID e2 : m_node_based_graph->GetAdjacentEdgeRange(node_v))
{ {
if (!m_node_based_graph->GetEdgeData(e2).forward) if (m_node_based_graph->GetEdgeData(e2).reversed)
{ {
continue; continue;
} }
@ -636,9 +406,9 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
const EdgeData &edge_data1 = m_node_based_graph->GetEdgeData(e1); const EdgeData &edge_data1 = m_node_based_graph->GetEdgeData(e1);
const EdgeData &edge_data2 = m_node_based_graph->GetEdgeData(e2); const EdgeData &edge_data2 = m_node_based_graph->GetEdgeData(e2);
BOOST_ASSERT(edge_data1.edgeBasedNodeID != edge_data2.edgeBasedNodeID); BOOST_ASSERT(edge_data1.edge_id != edge_data2.edge_id);
BOOST_ASSERT(edge_data1.forward); BOOST_ASSERT(!edge_data1.reversed);
BOOST_ASSERT(edge_data2.forward); BOOST_ASSERT(!edge_data2.reversed);
// the following is the core of the loop. // the following is the core of the loop.
unsigned distance = edge_data1.distance; unsigned distance = edge_data1.distance;
@ -649,14 +419,14 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
// unpack last node of first segment if packed // unpack last node of first segment if packed
const auto first_coordinate = const auto first_coordinate =
m_node_info_list[(m_geometry_compressor.HasEntryForID(e1) m_node_info_list[(m_compressed_edge_container.HasEntryForID(e1)
? m_geometry_compressor.GetLastNodeIDOfBucket(e1) ? m_compressed_edge_container.GetLastEdgeSourceID(e1)
: node_u)]; : node_u)];
// unpack first node of second segment if packed // unpack first node of second segment if packed
const auto third_coordinate = const auto third_coordinate =
m_node_info_list[(m_geometry_compressor.HasEntryForID(e2) m_node_info_list[(m_compressed_edge_container.HasEntryForID(e2)
? m_geometry_compressor.GetFirstNodeIDOfBucket(e2) ? m_compressed_edge_container.GetFirstEdgeTargetID(e2)
: node_w)]; : node_w)];
const double turn_angle = ComputeAngle::OfThreeFixedPointCoordinates( const double turn_angle = ComputeAngle::OfThreeFixedPointCoordinates(
@ -670,7 +440,7 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
} }
distance += turn_penalty; distance += turn_penalty;
const bool edge_is_compressed = m_geometry_compressor.HasEntryForID(e1); const bool edge_is_compressed = m_compressed_edge_container.HasEntryForID(e1);
if (edge_is_compressed) if (edge_is_compressed)
{ {
@ -678,8 +448,8 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
} }
original_edge_data_vector.emplace_back( original_edge_data_vector.emplace_back(
(edge_is_compressed ? m_geometry_compressor.GetPositionForID(e1) : node_v), (edge_is_compressed ? m_compressed_edge_container.GetPositionForID(e1) : node_v),
edge_data1.nameID, turn_instruction, edge_is_compressed, edge_data1.name_id, turn_instruction, edge_is_compressed,
edge_data2.travel_mode); edge_data2.travel_mode);
++original_edges_counter; ++original_edges_counter;
@ -689,12 +459,11 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges(
FlushVectorToStream(edge_data_file, original_edge_data_vector); FlushVectorToStream(edge_data_file, original_edge_data_vector);
} }
BOOST_ASSERT(SPECIAL_NODEID != edge_data1.edgeBasedNodeID); BOOST_ASSERT(SPECIAL_NODEID != edge_data1.edge_id);
BOOST_ASSERT(SPECIAL_NODEID != edge_data2.edgeBasedNodeID); BOOST_ASSERT(SPECIAL_NODEID != edge_data2.edge_id);
m_edge_based_edge_list.emplace_back( m_edge_based_edge_list.emplace_back(edge_data1.edge_id, edge_data2.edge_id,
EdgeBasedEdge(edge_data1.edgeBasedNodeID, edge_data2.edgeBasedNodeID, m_edge_based_edge_list.size(), distance, true, false);
m_edge_based_edge_list.size(), distance, true, false));
} }
} }
} }
@ -723,7 +492,8 @@ int EdgeBasedGraphFactory::GetTurnPenalty(double angle, lua_State *lua_state) co
try try
{ {
// call lua profile to compute turn penalty // call lua profile to compute turn penalty
return luabind::call_function<int>(lua_state, "turn_function", 180. - angle); double penalty = luabind::call_function<double>(lua_state, "turn_function", 180. - angle);
return static_cast<int>(penalty);
} }
catch (const luabind::error &er) catch (const luabind::error &er)
{ {
@ -777,11 +547,11 @@ TurnInstruction EdgeBasedGraphFactory::AnalyzeTurn(const NodeID node_u,
// If street names stay the same and if we are certain that it is not a // If street names stay the same and if we are certain that it is not a
// a segment of a roundabout, we skip it. // a segment of a roundabout, we skip it.
if (data1.nameID == data2.nameID) if (data1.name_id == data2.name_id && data1.travel_mode == data2.travel_mode)
{ {
// TODO: Here we should also do a small graph exploration to check for // TODO: Here we should also do a small graph exploration to check for
// more complex situations // more complex situations
if (0 != data1.nameID || m_node_based_graph->GetOutDegree(node_v) <= 2) if (0 != data1.name_id || m_node_based_graph->GetOutDegree(node_v) <= 2)
{ {
return TurnInstruction::NoTurn; return TurnInstruction::NoTurn;
} }
@ -790,7 +560,3 @@ TurnInstruction EdgeBasedGraphFactory::AnalyzeTurn(const NodeID node_u,
return TurnInstructionsClass::GetTurnDirectionOfInstruction(angle); return TurnInstructionsClass::GetTurnDirectionOfInstruction(angle);
} }
unsigned EdgeBasedGraphFactory::GetNumberOfEdgeBasedNodes() const
{
return m_number_of_edge_based_nodes;
}

View File

@ -30,8 +30,9 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef EDGE_BASED_GRAPH_FACTORY_HPP_ #ifndef EDGE_BASED_GRAPH_FACTORY_HPP_
#define EDGE_BASED_GRAPH_FACTORY_HPP_ #define EDGE_BASED_GRAPH_FACTORY_HPP_
#include "geometry_compressor.hpp" #include "speed_profile.hpp"
#include "../typedefs.h" #include "../typedefs.h"
#include "../data_structures/compressed_edge_container.hpp"
#include "../data_structures/deallocating_vector.hpp" #include "../data_structures/deallocating_vector.hpp"
#include "../data_structures/edge_based_node.hpp" #include "../data_structures/edge_based_node.hpp"
#include "../data_structures/original_edge_data.hpp" #include "../data_structures/original_edge_data.hpp"
@ -57,73 +58,55 @@ class EdgeBasedGraphFactory
EdgeBasedGraphFactory() = delete; EdgeBasedGraphFactory() = delete;
EdgeBasedGraphFactory(const EdgeBasedGraphFactory &) = delete; EdgeBasedGraphFactory(const EdgeBasedGraphFactory &) = delete;
struct SpeedProfileProperties;
explicit EdgeBasedGraphFactory(std::shared_ptr<NodeBasedDynamicGraph> node_based_graph, explicit EdgeBasedGraphFactory(std::shared_ptr<NodeBasedDynamicGraph> node_based_graph,
std::shared_ptr<RestrictionMap> restricion_map, const CompressedEdgeContainer &compressed_edge_container,
std::unique_ptr<std::vector<NodeID>> barrier_node_list, const std::unordered_set<NodeID> &barrier_nodes,
std::unique_ptr<std::vector<NodeID>> traffic_light_node_list, const std::unordered_set<NodeID> &traffic_lights,
std::shared_ptr<const RestrictionMap> restriction_map,
const std::vector<QueryNode> &node_info_list, const std::vector<QueryNode> &node_info_list,
const SpeedProfileProperties &speed_profile); SpeedProfileProperties speed_profile);
void Run(const std::string &original_edge_data_filename, void Run(const std::string &original_edge_data_filename,
const std::string &geometry_filename,
lua_State *lua_state); lua_State *lua_state);
void GetEdgeBasedEdges(DeallocatingVector<EdgeBasedEdge> &edges); void GetEdgeBasedEdges(DeallocatingVector<EdgeBasedEdge> &edges);
void GetEdgeBasedNodes(std::vector<EdgeBasedNode> &nodes); void GetEdgeBasedNodes(std::vector<EdgeBasedNode> &nodes);
unsigned GetHighestEdgeID();
TurnInstruction AnalyzeTurn(const NodeID u, const NodeID v, const NodeID w, const double angle) const; TurnInstruction AnalyzeTurn(const NodeID u, const NodeID v, const NodeID w, const double angle) const;
int GetTurnPenalty(double angle, lua_State *lua_state) const; int GetTurnPenalty(double angle, lua_State *lua_state) const;
unsigned GetNumberOfEdgeBasedNodes() const;
struct SpeedProfileProperties
{
SpeedProfileProperties()
: traffic_signal_penalty(0), u_turn_penalty(0), has_turn_penalty_function(false)
{
}
int traffic_signal_penalty;
int u_turn_penalty;
bool has_turn_penalty_function;
} speed_profile;
private: private:
using EdgeData = NodeBasedDynamicGraph::EdgeData; using EdgeData = NodeBasedDynamicGraph::EdgeData;
unsigned m_number_of_edge_based_nodes;
std::vector<EdgeBasedNode> m_edge_based_node_list; std::vector<EdgeBasedNode> m_edge_based_node_list;
DeallocatingVector<EdgeBasedEdge> m_edge_based_edge_list; DeallocatingVector<EdgeBasedEdge> m_edge_based_edge_list;
unsigned m_max_edge_id;
const std::vector<QueryNode>& m_node_info_list; const std::vector<QueryNode>& m_node_info_list;
std::shared_ptr<NodeBasedDynamicGraph> m_node_based_graph; std::shared_ptr<NodeBasedDynamicGraph> m_node_based_graph;
std::shared_ptr<RestrictionMap> m_restriction_map; std::shared_ptr<RestrictionMap const> m_restriction_map;
std::unordered_set<NodeID> m_barrier_nodes; const std::unordered_set<NodeID>& m_barrier_nodes;
std::unordered_set<NodeID> m_traffic_lights; const std::unordered_set<NodeID>& m_traffic_lights;
const CompressedEdgeContainer& m_compressed_edge_container;
SpeedProfileProperties speed_profile;
GeometryCompressor m_geometry_compressor;
void CompressGeometry(); void CompressGeometry();
void RenumberEdges(); unsigned RenumberEdges();
void GenerateEdgeExpandedNodes(); void GenerateEdgeExpandedNodes();
void GenerateEdgeExpandedEdges(const std::string &original_edge_data_filename, void GenerateEdgeExpandedEdges(const std::string &original_edge_data_filename,
lua_State *lua_state); lua_State *lua_state);
void InsertEdgeBasedNode(const NodeID u, const NodeID v, const unsigned component_id); void InsertEdgeBasedNode(const NodeID u, const NodeID v);
void FlushVectorToStream(std::ofstream &edge_data_file, void FlushVectorToStream(std::ofstream &edge_data_file,
std::vector<OriginalEdgeData> &original_edge_data_vector) const; std::vector<OriginalEdgeData> &original_edge_data_vector) const;
NodeID max_id;
std::size_t removed_node_count;
}; };
#endif /* EDGE_BASED_GRAPH_FACTORY_HPP_ */ #endif /* EDGE_BASED_GRAPH_FACTORY_HPP_ */

View File

@ -28,8 +28,10 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "processing_chain.hpp" #include "processing_chain.hpp"
#include "contractor.hpp" #include "contractor.hpp"
#include "../algorithms/graph_compressor.hpp"
#include "../algorithms/tarjan_scc.hpp"
#include "../algorithms/crc32_processor.hpp" #include "../algorithms/crc32_processor.hpp"
#include "../data_structures/compressed_edge_container.hpp"
#include "../data_structures/deallocating_vector.hpp" #include "../data_structures/deallocating_vector.hpp"
#include "../data_structures/static_rtree.hpp" #include "../data_structures/static_rtree.hpp"
#include "../data_structures/restriction_map.hpp" #include "../data_structures/restriction_map.hpp"
@ -38,7 +40,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../util/graph_loader.hpp" #include "../util/graph_loader.hpp"
#include "../util/integer_range.hpp" #include "../util/integer_range.hpp"
#include "../util/lua_util.hpp" #include "../util/lua_util.hpp"
#include "../util/make_unique.hpp"
#include "../util/osrm_exception.hpp" #include "../util/osrm_exception.hpp"
#include "../util/simple_logger.hpp" #include "../util/simple_logger.hpp"
#include "../util/string_util.hpp" #include "../util/string_util.hpp"
@ -77,50 +78,51 @@ int Prepare::Run()
TIMER_START(expansion); TIMER_START(expansion);
auto node_based_edge_list = osrm::make_unique<std::vector<EdgeBasedNode>>();; std::vector<EdgeBasedNode> node_based_edge_list;
DeallocatingVector<EdgeBasedEdge> edge_based_edge_list; DeallocatingVector<EdgeBasedEdge> edge_based_edge_list;
auto internal_to_external_node_map = osrm::make_unique<std::vector<QueryNode>>(); std::vector<QueryNode> internal_to_external_node_map;
auto graph_size = auto graph_size = BuildEdgeExpandedGraph(internal_to_external_node_map, node_based_edge_list,
BuildEdgeExpandedGraph(*internal_to_external_node_map, edge_based_edge_list);
*node_based_edge_list, edge_based_edge_list);
auto number_of_node_based_nodes = graph_size.first; auto number_of_node_based_nodes = graph_size.first;
auto number_of_edge_based_nodes = graph_size.second; auto max_edge_id = graph_size.second;
TIMER_STOP(expansion); TIMER_STOP(expansion);
SimpleLogger().Write() << "building r-tree ..."; SimpleLogger().Write() << "building r-tree ...";
TIMER_START(rtree); TIMER_START(rtree);
BuildRTree(*node_based_edge_list, *internal_to_external_node_map); FindComponents(max_edge_id, edge_based_edge_list, node_based_edge_list);
BuildRTree(node_based_edge_list, internal_to_external_node_map);
TIMER_STOP(rtree); TIMER_STOP(rtree);
SimpleLogger().Write() << "writing node map ..."; SimpleLogger().Write() << "writing node map ...";
WriteNodeMapping(std::move(internal_to_external_node_map)); WriteNodeMapping(internal_to_external_node_map);
// Contracting the edge-expanded graph // Contracting the edge-expanded graph
TIMER_START(contraction); TIMER_START(contraction);
auto contracted_edge_list = osrm::make_unique<DeallocatingVector<QueryEdge>>(); std::vector<bool> is_core_node;
ContractGraph(number_of_edge_based_nodes, edge_based_edge_list, *contracted_edge_list); DeallocatingVector<QueryEdge> contracted_edge_list;
ContractGraph(max_edge_id, edge_based_edge_list, contracted_edge_list, is_core_node);
TIMER_STOP(contraction); TIMER_STOP(contraction);
SimpleLogger().Write() << "Contraction took " << TIMER_SEC(contraction) << " sec"; SimpleLogger().Write() << "Contraction took " << TIMER_SEC(contraction) << " sec";
std::size_t number_of_used_edges = WriteContractedGraph(number_of_edge_based_nodes, std::size_t number_of_used_edges =
std::move(node_based_edge_list), WriteContractedGraph(max_edge_id, node_based_edge_list, contracted_edge_list);
std::move(contracted_edge_list)); WriteCoreNodeMarker(std::move(is_core_node));
TIMER_STOP(preparing); TIMER_STOP(preparing);
SimpleLogger().Write() << "Preprocessing : " << TIMER_SEC(preparing) << " seconds"; SimpleLogger().Write() << "Preprocessing : " << TIMER_SEC(preparing) << " seconds";
SimpleLogger().Write() << "Expansion : " << (number_of_node_based_nodes / TIMER_SEC(expansion)) SimpleLogger().Write() << "Expansion : " << (number_of_node_based_nodes / TIMER_SEC(expansion))
<< " nodes/sec and " << " nodes/sec and " << ((max_edge_id + 1) / TIMER_SEC(expansion))
<< (number_of_edge_based_nodes / TIMER_SEC(expansion)) << " edges/sec"; << " edges/sec";
SimpleLogger().Write() << "Contraction: " SimpleLogger().Write() << "Contraction: " << ((max_edge_id + 1) / TIMER_SEC(contraction))
<< (number_of_edge_based_nodes / TIMER_SEC(contraction))
<< " nodes/sec and " << number_of_used_edges / TIMER_SEC(contraction) << " nodes/sec and " << number_of_used_edges / TIMER_SEC(contraction)
<< " edges/sec"; << " edges/sec";
@ -129,25 +131,117 @@ int Prepare::Run()
return 0; return 0;
} }
std::size_t Prepare::WriteContractedGraph(unsigned number_of_edge_based_nodes, void Prepare::FindComponents(unsigned max_edge_id,
std::unique_ptr<std::vector<EdgeBasedNode>> node_based_edge_list, const DeallocatingVector<EdgeBasedEdge> &input_edge_list,
std::unique_ptr<DeallocatingVector<QueryEdge>> contracted_edge_list) std::vector<EdgeBasedNode> &input_nodes) const
{ {
const unsigned crc32_value = CalculateEdgeChecksum(std::move(node_based_edge_list)); struct UncontractedEdgeData
{
};
struct InputEdge
{
unsigned source;
unsigned target;
UncontractedEdgeData data;
bool operator<(const InputEdge &rhs) const
{
return source < rhs.source || (source == rhs.source && target < rhs.target);
}
bool operator==(const InputEdge &rhs) const
{
return source == rhs.source && target == rhs.target;
}
};
using UncontractedGraph = StaticGraph<UncontractedEdgeData>;
std::vector<InputEdge> edges;
edges.reserve(input_edge_list.size() * 2);
for (const auto &edge : input_edge_list)
{
BOOST_ASSERT_MSG(static_cast<unsigned int>(std::max(edge.weight, 1)) > 0,
"edge distance < 1");
if (edge.forward)
{
edges.push_back({edge.source, edge.target, {}});
}
if (edge.backward)
{
edges.push_back({edge.target, edge.source, {}});
}
}
// connect forward and backward nodes of each edge
for (const auto &node : input_nodes)
{
if (node.reverse_edge_based_node_id != SPECIAL_NODEID)
{
edges.push_back({node.forward_edge_based_node_id, node.reverse_edge_based_node_id, {}});
edges.push_back({node.reverse_edge_based_node_id, node.forward_edge_based_node_id, {}});
}
}
tbb::parallel_sort(edges.begin(), edges.end());
auto new_end = std::unique(edges.begin(), edges.end());
edges.resize(new_end - edges.begin());
auto uncontractor_graph = std::make_shared<UncontractedGraph>(max_edge_id + 1, edges);
TarjanSCC<UncontractedGraph> component_search(
std::const_pointer_cast<const UncontractedGraph>(uncontractor_graph));
component_search.run();
for (auto &node : input_nodes)
{
auto forward_component = component_search.get_component_id(node.forward_edge_based_node_id);
BOOST_ASSERT(node.reverse_edge_based_node_id == SPECIAL_EDGEID ||
forward_component ==
component_search.get_component_id(node.reverse_edge_based_node_id));
const unsigned component_size = component_search.get_component_size(forward_component);
const bool is_tiny_component = component_size < 1000;
node.component_id = is_tiny_component ? (1 + forward_component) : 0;
}
}
void Prepare::WriteCoreNodeMarker(std::vector<bool> &&in_is_core_node) const
{
std::vector<bool> is_core_node(in_is_core_node);
std::vector<char> unpacked_bool_flags(is_core_node.size());
for (auto i = 0u; i < is_core_node.size(); ++i)
{
unpacked_bool_flags[i] = is_core_node[i] ? 1 : 0;
}
boost::filesystem::ofstream core_marker_output_stream(config.core_output_path,
std::ios::binary);
unsigned size = unpacked_bool_flags.size();
core_marker_output_stream.write((char *)&size, sizeof(unsigned));
core_marker_output_stream.write((char *)unpacked_bool_flags.data(),
sizeof(char) * unpacked_bool_flags.size());
}
std::size_t Prepare::WriteContractedGraph(unsigned max_node_id,
const std::vector<EdgeBasedNode> &node_based_edge_list,
const DeallocatingVector<QueryEdge> &contracted_edge_list)
{
const unsigned crc32_value = CalculateEdgeChecksum(node_based_edge_list);
// Sorting contracted edges in a way that the static query graph can read some in in-place. // Sorting contracted edges in a way that the static query graph can read some in in-place.
tbb::parallel_sort(contracted_edge_list->begin(), contracted_edge_list->end()); tbb::parallel_sort(contracted_edge_list.begin(), contracted_edge_list.end());
const unsigned contracted_edge_count = contracted_edge_list->size(); const unsigned contracted_edge_count = contracted_edge_list.size();
SimpleLogger().Write() << "Serializing compacted graph of " << contracted_edge_count SimpleLogger().Write() << "Serializing compacted graph of " << contracted_edge_count
<< " edges"; << " edges";
const FingerPrint fingerprint = FingerPrint::GetValid(); const FingerPrint fingerprint = FingerPrint::GetValid();
boost::filesystem::ofstream hsgr_output_stream(config.graph_output_path, std::ios::binary); boost::filesystem::ofstream hsgr_output_stream(config.graph_output_path, std::ios::binary);
hsgr_output_stream.write((char *)&fingerprint, sizeof(FingerPrint)); hsgr_output_stream.write((char *)&fingerprint, sizeof(FingerPrint));
const unsigned max_used_node_id = 1 + [&contracted_edge_list] const unsigned max_used_node_id = [&contracted_edge_list]
{ {
unsigned tmp_max = 0; unsigned tmp_max = 0;
for (const QueryEdge &edge : *contracted_edge_list) for (const QueryEdge &edge : contracted_edge_list)
{ {
BOOST_ASSERT(SPECIAL_NODEID != edge.source); BOOST_ASSERT(SPECIAL_NODEID != edge.source);
BOOST_ASSERT(SPECIAL_NODEID != edge.target); BOOST_ASSERT(SPECIAL_NODEID != edge.target);
@ -157,22 +251,23 @@ std::size_t Prepare::WriteContractedGraph(unsigned number_of_edge_based_nodes,
return tmp_max; return tmp_max;
}(); }();
SimpleLogger().Write(logDEBUG) << "input graph has " << number_of_edge_based_nodes << " nodes"; SimpleLogger().Write(logDEBUG) << "input graph has " << (max_node_id + 1) << " nodes";
SimpleLogger().Write(logDEBUG) << "contracted graph has " << max_used_node_id << " nodes"; SimpleLogger().Write(logDEBUG) << "contracted graph has " << (max_used_node_id + 1) << " nodes";
std::vector<StaticGraph<EdgeData>::NodeArrayEntry> node_array; std::vector<StaticGraph<EdgeData>::NodeArrayEntry> node_array;
node_array.resize(number_of_edge_based_nodes + 1); // make sure we have at least one sentinel
node_array.resize(max_node_id + 2);
SimpleLogger().Write() << "Building node array"; SimpleLogger().Write() << "Building node array";
StaticGraph<EdgeData>::EdgeIterator edge = 0; StaticGraph<EdgeData>::EdgeIterator edge = 0;
StaticGraph<EdgeData>::EdgeIterator position = 0; StaticGraph<EdgeData>::EdgeIterator position = 0;
StaticGraph<EdgeData>::EdgeIterator last_edge = edge; StaticGraph<EdgeData>::EdgeIterator last_edge;
// initializing 'first_edge'-field of nodes: // initializing 'first_edge'-field of nodes:
for (const auto node : osrm::irange(0u, max_used_node_id)) for (const auto node : osrm::irange(0u, max_used_node_id + 1))
{ {
last_edge = edge; last_edge = edge;
while ((edge < contracted_edge_count) && ((*contracted_edge_list)[edge].source == node)) while ((edge < contracted_edge_count) && (contracted_edge_list[edge].source == node))
{ {
++edge; ++edge;
} }
@ -180,7 +275,8 @@ std::size_t Prepare::WriteContractedGraph(unsigned number_of_edge_based_nodes,
position += edge - last_edge; // remove position += edge - last_edge; // remove
} }
for (const auto sentinel_counter : osrm::irange<unsigned>(max_used_node_id, node_array.size())) for (const auto sentinel_counter :
osrm::irange<unsigned>(max_used_node_id + 1, node_array.size()))
{ {
// sentinel element, guarded against underflow // sentinel element, guarded against underflow
node_array[sentinel_counter].first_edge = contracted_edge_count; node_array[sentinel_counter].first_edge = contracted_edge_count;
@ -204,29 +300,28 @@ std::size_t Prepare::WriteContractedGraph(unsigned number_of_edge_based_nodes,
// serialize all edges // serialize all edges
SimpleLogger().Write() << "Building edge array"; SimpleLogger().Write() << "Building edge array";
edge = 0;
int number_of_used_edges = 0; int number_of_used_edges = 0;
StaticGraph<EdgeData>::EdgeArrayEntry current_edge; StaticGraph<EdgeData>::EdgeArrayEntry current_edge;
for (const auto edge : osrm::irange<std::size_t>(0, contracted_edge_list->size())) for (const auto edge : osrm::irange<std::size_t>(0, contracted_edge_list.size()))
{ {
// no eigen loops // no eigen loops
BOOST_ASSERT((*contracted_edge_list)[edge].source != (*contracted_edge_list)[edge].target); BOOST_ASSERT(contracted_edge_list[edge].source != contracted_edge_list[edge].target);
current_edge.target = (*contracted_edge_list)[edge].target; current_edge.target = contracted_edge_list[edge].target;
current_edge.data = (*contracted_edge_list)[edge].data; current_edge.data = contracted_edge_list[edge].data;
// every target needs to be valid // every target needs to be valid
BOOST_ASSERT(current_edge.target < max_used_node_id); BOOST_ASSERT(current_edge.target <= max_used_node_id);
#ifndef NDEBUG #ifndef NDEBUG
if (current_edge.data.distance <= 0) if (current_edge.data.distance <= 0)
{ {
SimpleLogger().Write(logWARNING) << "Edge: " << edge SimpleLogger().Write(logWARNING) << "Edge: " << edge
<< ",source: " << (*contracted_edge_list)[edge].source << ",source: " << contracted_edge_list[edge].source
<< ", target: " << (*contracted_edge_list)[edge].target << ", target: " << contracted_edge_list[edge].target
<< ", dist: " << current_edge.data.distance; << ", dist: " << current_edge.data.distance;
SimpleLogger().Write(logWARNING) << "Failed at adjacency list of node " SimpleLogger().Write(logWARNING) << "Failed at adjacency list of node "
<< (*contracted_edge_list)[edge].source << "/" << contracted_edge_list[edge].source << "/"
<< node_array.size() - 1; << node_array.size() - 1;
return 1; return 1;
} }
@ -240,7 +335,7 @@ std::size_t Prepare::WriteContractedGraph(unsigned number_of_edge_based_nodes,
return number_of_used_edges; return number_of_used_edges;
} }
unsigned Prepare::CalculateEdgeChecksum(std::unique_ptr<std::vector<EdgeBasedNode>> node_based_edge_list) unsigned Prepare::CalculateEdgeChecksum(const std::vector<EdgeBasedNode> &node_based_edge_list)
{ {
RangebasedCRC32 crc32; RangebasedCRC32 crc32;
if (crc32.using_hardware()) if (crc32.using_hardware())
@ -252,7 +347,7 @@ unsigned Prepare::CalculateEdgeChecksum(std::unique_ptr<std::vector<EdgeBasedNod
SimpleLogger().Write() << "using software based CRC32 computation"; SimpleLogger().Write() << "using software based CRC32 computation";
} }
const unsigned crc32_value = crc32(*node_based_edge_list); const unsigned crc32_value = crc32(node_based_edge_list);
SimpleLogger().Write() << "CRC32: " << crc32_value; SimpleLogger().Write() << "CRC32: " << crc32_value;
return crc32_value; return crc32_value;
@ -262,8 +357,7 @@ unsigned Prepare::CalculateEdgeChecksum(std::unique_ptr<std::vector<EdgeBasedNod
\brief Setups scripting environment (lua-scripting) \brief Setups scripting environment (lua-scripting)
Also initializes speed profile. Also initializes speed profile.
*/ */
void Prepare::SetupScriptingEnvironment( void Prepare::SetupScriptingEnvironment(lua_State *lua_state, SpeedProfileProperties &speed_profile)
lua_State *lua_state, EdgeBasedGraphFactory::SpeedProfileProperties &speed_profile)
{ {
// open utility libraries string library; // open utility libraries string library;
luaL_openlibs(lua_state); luaL_openlibs(lua_state);
@ -305,7 +399,8 @@ void Prepare::SetupScriptingEnvironment(
*/ */
std::shared_ptr<RestrictionMap> Prepare::LoadRestrictionMap() std::shared_ptr<RestrictionMap> Prepare::LoadRestrictionMap()
{ {
boost::filesystem::ifstream input_stream(config.restrictions_path, std::ios::in | std::ios::binary); boost::filesystem::ifstream input_stream(config.restrictions_path,
std::ios::in | std::ios::binary);
std::vector<TurnRestriction> restriction_list; std::vector<TurnRestriction> restriction_list;
loadRestrictionsFromFile(input_stream, restriction_list); loadRestrictionsFromFile(input_stream, restriction_list);
@ -319,21 +414,32 @@ std::shared_ptr<RestrictionMap> Prepare::LoadRestrictionMap()
\brief Load node based graph from .osrm file \brief Load node based graph from .osrm file
*/ */
std::shared_ptr<NodeBasedDynamicGraph> std::shared_ptr<NodeBasedDynamicGraph>
Prepare::LoadNodeBasedGraph(std::vector<NodeID> &barrier_node_list, Prepare::LoadNodeBasedGraph(std::unordered_set<NodeID> &barrier_nodes,
std::vector<NodeID> &traffic_light_list, std::unordered_set<NodeID> &traffic_lights,
std::vector<QueryNode>& internal_to_external_node_map) std::vector<QueryNode> &internal_to_external_node_map)
{ {
std::vector<NodeBasedEdge> edge_list; std::vector<NodeBasedEdge> edge_list;
boost::filesystem::ifstream input_stream(config.osrm_input_path, std::ios::in | std::ios::binary); boost::filesystem::ifstream input_stream(config.osrm_input_path,
std::ios::in | std::ios::binary);
NodeID number_of_node_based_nodes = loadNodesFromFile(input_stream, std::vector<NodeID> barrier_list;
barrier_node_list, traffic_light_list, std::vector<NodeID> traffic_light_list;
internal_to_external_node_map); NodeID number_of_node_based_nodes = loadNodesFromFile(
input_stream, barrier_list, traffic_light_list, internal_to_external_node_map);
SimpleLogger().Write() << " - " << barrier_node_list.size() << " bollard nodes, " SimpleLogger().Write() << " - " << barrier_list.size() << " bollard nodes, "
<< traffic_light_list.size() << " traffic lights"; << traffic_light_list.size() << " traffic lights";
// insert into unordered sets for fast lookup
barrier_nodes.insert(barrier_list.begin(), barrier_list.end());
traffic_lights.insert(traffic_light_list.begin(), traffic_light_list.end());
barrier_list.clear();
barrier_list.shrink_to_fit();
traffic_light_list.clear();
traffic_light_list.shrink_to_fit();
loadEdgesFromFile(input_stream, edge_list); loadEdgesFromFile(input_stream, edge_list);
if (edge_list.empty()) if (edge_list.empty())
@ -342,10 +448,9 @@ Prepare::LoadNodeBasedGraph(std::vector<NodeID> &barrier_node_list,
return std::shared_ptr<NodeBasedDynamicGraph>(); return std::shared_ptr<NodeBasedDynamicGraph>();
} }
return NodeBasedDynamicGraphFromImportEdges(number_of_node_based_nodes, edge_list); return NodeBasedDynamicGraphFromEdges(number_of_node_based_nodes, edge_list);
} }
/** /**
\brief Building an edge-expanded graph from node-based input and turn restrictions \brief Building an edge-expanded graph from node-based input and turn restrictions
*/ */
@ -357,62 +462,64 @@ Prepare::BuildEdgeExpandedGraph(std::vector<QueryNode> &internal_to_external_nod
lua_State *lua_state = luaL_newstate(); lua_State *lua_state = luaL_newstate();
luabind::open(lua_state); luabind::open(lua_state);
EdgeBasedGraphFactory::SpeedProfileProperties speed_profile; SpeedProfileProperties speed_profile;
SetupScriptingEnvironment(lua_state, speed_profile); SetupScriptingEnvironment(lua_state, speed_profile);
auto barrier_node_list = osrm::make_unique<std::vector<NodeID>>(); std::unordered_set<NodeID> barrier_nodes;
auto traffic_light_list = osrm::make_unique<std::vector<NodeID>>(); std::unordered_set<NodeID> traffic_lights;
auto restriction_map = LoadRestrictionMap(); auto restriction_map = LoadRestrictionMap();
auto node_based_graph = LoadNodeBasedGraph(*barrier_node_list, *traffic_light_list, internal_to_external_node_map); auto node_based_graph =
LoadNodeBasedGraph(barrier_nodes, traffic_lights, internal_to_external_node_map);
const std::size_t number_of_node_based_nodes = node_based_graph->GetNumberOfNodes(); CompressedEdgeContainer compressed_edge_container;
GraphCompressor graph_compressor(speed_profile);
graph_compressor.Compress(barrier_nodes, traffic_lights, *restriction_map, *node_based_graph,
compressed_edge_container);
EdgeBasedGraphFactory edge_based_graph_factory(node_based_graph, EdgeBasedGraphFactory edge_based_graph_factory(
restriction_map, node_based_graph, compressed_edge_container, barrier_nodes, traffic_lights,
std::move(barrier_node_list), std::const_pointer_cast<RestrictionMap const>(restriction_map),
std::move(traffic_light_list), internal_to_external_node_map, speed_profile);
internal_to_external_node_map,
speed_profile);
edge_based_graph_factory.Run(config.edge_output_path, config.geometry_output_path, lua_state); compressed_edge_container.SerializeInternalVector(config.geometry_output_path);
edge_based_graph_factory.Run(config.edge_output_path, lua_state);
lua_close(lua_state); lua_close(lua_state);
const std::size_t number_of_edge_based_nodes =
edge_based_graph_factory.GetNumberOfEdgeBasedNodes();
BOOST_ASSERT(number_of_edge_based_nodes != std::numeric_limits<unsigned>::max());
edge_based_graph_factory.GetEdgeBasedEdges(edge_based_edge_list); edge_based_graph_factory.GetEdgeBasedEdges(edge_based_edge_list);
edge_based_graph_factory.GetEdgeBasedNodes(node_based_edge_list); edge_based_graph_factory.GetEdgeBasedNodes(node_based_edge_list);
auto max_edge_id = edge_based_graph_factory.GetHighestEdgeID();
return std::make_pair(number_of_node_based_nodes, number_of_edge_based_nodes); const std::size_t number_of_node_based_nodes = node_based_graph->GetNumberOfNodes();
return std::make_pair(number_of_node_based_nodes, max_edge_id);
} }
/** /**
\brief Build contracted graph. \brief Build contracted graph.
*/ */
void Prepare::ContractGraph(const std::size_t number_of_edge_based_nodes, void Prepare::ContractGraph(const unsigned max_edge_id,
DeallocatingVector<EdgeBasedEdge>& edge_based_edge_list, DeallocatingVector<EdgeBasedEdge> &edge_based_edge_list,
DeallocatingVector<QueryEdge>& contracted_edge_list) DeallocatingVector<QueryEdge> &contracted_edge_list,
std::vector<bool> &is_core_node)
{ {
Contractor contractor(number_of_edge_based_nodes, edge_based_edge_list); Contractor contractor(max_edge_id + 1, edge_based_edge_list);
contractor.Run(); contractor.Run(config.core_factor);
contractor.GetEdges(contracted_edge_list); contractor.GetEdges(contracted_edge_list);
contractor.GetCoreMarker(is_core_node);
} }
/** /**
\brief Writing info on original (node-based) nodes \brief Writing info on original (node-based) nodes
*/ */
void Prepare::WriteNodeMapping(std::unique_ptr<std::vector<QueryNode>> internal_to_external_node_map) void Prepare::WriteNodeMapping(const std::vector<QueryNode> &internal_to_external_node_map)
{ {
boost::filesystem::ofstream node_stream(config.node_output_path, std::ios::binary); boost::filesystem::ofstream node_stream(config.node_output_path, std::ios::binary);
const unsigned size_of_mapping = internal_to_external_node_map->size(); const unsigned size_of_mapping = internal_to_external_node_map.size();
node_stream.write((char *)&size_of_mapping, sizeof(unsigned)); node_stream.write((char *)&size_of_mapping, sizeof(unsigned));
if (size_of_mapping > 0) if (size_of_mapping > 0)
{ {
node_stream.write((char *) internal_to_external_node_map->data(), node_stream.write((char *)internal_to_external_node_map.data(),
size_of_mapping * sizeof(QueryNode)); size_of_mapping * sizeof(QueryNode));
} }
node_stream.close(); node_stream.close();
@ -423,8 +530,10 @@ void Prepare::WriteNodeMapping(std::unique_ptr<std::vector<QueryNode>> internal_
Saves tree into '.ramIndex' and leaves into '.fileIndex'. Saves tree into '.ramIndex' and leaves into '.fileIndex'.
*/ */
void Prepare::BuildRTree(const std::vector<EdgeBasedNode> &node_based_edge_list, const std::vector<QueryNode>& internal_to_external_node_map) void Prepare::BuildRTree(const std::vector<EdgeBasedNode> &node_based_edge_list,
const std::vector<QueryNode> &internal_to_external_node_map)
{ {
StaticRTree<EdgeBasedNode>(node_based_edge_list, config.rtree_nodes_output_path.c_str(), StaticRTree<EdgeBasedNode>(node_based_edge_list, config.rtree_nodes_output_path.c_str(),
config.rtree_leafs_output_path.c_str(), internal_to_external_node_map); config.rtree_leafs_output_path.c_str(),
internal_to_external_node_map);
} }

View File

@ -33,6 +33,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../data_structures/query_edge.hpp" #include "../data_structures/query_edge.hpp"
#include "../data_structures/static_graph.hpp" #include "../data_structures/static_graph.hpp"
struct SpeedProfileProperties;
struct EdgeBasedNode; struct EdgeBasedNode;
struct lua_State; struct lua_State;
@ -50,34 +51,39 @@ class Prepare
using InputEdge = DynamicGraph<EdgeData>::InputEdge; using InputEdge = DynamicGraph<EdgeData>::InputEdge;
using StaticEdge = StaticGraph<EdgeData>::InputEdge; using StaticEdge = StaticGraph<EdgeData>::InputEdge;
explicit Prepare(const ContractorConfig& contractor_config) explicit Prepare(ContractorConfig contractor_config) : config(std::move(contractor_config)) {}
: config(contractor_config) {}
Prepare(const Prepare &) = delete; Prepare(const Prepare &) = delete;
~Prepare(); ~Prepare();
int Run(); int Run();
protected: protected:
void SetupScriptingEnvironment(lua_State *myLuaState, void SetupScriptingEnvironment(lua_State *myLuaState, SpeedProfileProperties &speed_profile);
EdgeBasedGraphFactory::SpeedProfileProperties &speed_profile); unsigned CalculateEdgeChecksum(const std::vector<EdgeBasedNode> &node_based_edge_list);
std::shared_ptr<RestrictionMap> LoadRestrictionMap(); void ContractGraph(const unsigned max_edge_id,
unsigned CalculateEdgeChecksum(std::unique_ptr<std::vector<EdgeBasedNode>> node_based_edge_list); DeallocatingVector<EdgeBasedEdge> &edge_based_edge_list,
void ContractGraph(const std::size_t number_of_edge_based_nodes, DeallocatingVector<QueryEdge> &contracted_edge_list,
DeallocatingVector<EdgeBasedEdge>& edge_based_edge_list, std::vector<bool> &is_core_node);
DeallocatingVector<QueryEdge>& contracted_edge_list); void WriteCoreNodeMarker(std::vector<bool> &&is_core_node) const;
std::size_t WriteContractedGraph(unsigned number_of_edge_based_nodes, std::size_t WriteContractedGraph(unsigned number_of_edge_based_nodes,
std::unique_ptr<std::vector<EdgeBasedNode>> node_based_edge_list, const std::vector<EdgeBasedNode> &node_based_edge_list,
std::unique_ptr<DeallocatingVector<QueryEdge>> contracted_edge_list); const DeallocatingVector<QueryEdge> &contracted_edge_list);
std::shared_ptr<NodeBasedDynamicGraph> LoadNodeBasedGraph(std::vector<NodeID> &barrier_node_list, std::shared_ptr<RestrictionMap> LoadRestrictionMap();
std::vector<NodeID> &traffic_light_list, std::shared_ptr<NodeBasedDynamicGraph>
std::vector<QueryNode>& internal_to_external_node_map); LoadNodeBasedGraph(std::unordered_set<NodeID> &barrier_nodes,
std::unordered_set<NodeID> &traffic_lights,
std::vector<QueryNode> &internal_to_external_node_map);
std::pair<std::size_t, std::size_t> std::pair<std::size_t, std::size_t>
BuildEdgeExpandedGraph(std::vector<QueryNode> &internal_to_external_node_map, BuildEdgeExpandedGraph(std::vector<QueryNode> &internal_to_external_node_map,
std::vector<EdgeBasedNode> &node_based_edge_list, std::vector<EdgeBasedNode> &node_based_edge_list,
DeallocatingVector<EdgeBasedEdge> &edge_based_edge_list); DeallocatingVector<EdgeBasedEdge> &edge_based_edge_list);
void WriteNodeMapping(std::unique_ptr<std::vector<QueryNode>> internal_to_external_node_map); void WriteNodeMapping(const std::vector<QueryNode> &internal_to_external_node_map);
void FindComponents(unsigned max_edge_id,
const DeallocatingVector<EdgeBasedEdge> &edges,
std::vector<EdgeBasedNode> &nodes) const;
void BuildRTree(const std::vector<EdgeBasedNode> &node_based_edge_list, void BuildRTree(const std::vector<EdgeBasedNode> &node_based_edge_list,
const std::vector<QueryNode> &internal_to_external_node_map); const std::vector<QueryNode> &internal_to_external_node_map);
private: private:
ContractorConfig config; ContractorConfig config;
}; };

View File

@ -0,0 +1,16 @@
#ifndef SPEED_PROFILE_PROPERTIES_HPP
#define SPEED_PROFILE_PROPERTIES_HPP
struct SpeedProfileProperties
{
SpeedProfileProperties()
: traffic_signal_penalty(0), u_turn_penalty(0), has_turn_penalty_function(false)
{
}
int traffic_signal_penalty;
int u_turn_penalty;
bool has_turn_penalty_function;
};
#endif

View File

@ -189,6 +189,11 @@ class BinaryHeap
return inserted_nodes[heap[1].index].node; return inserted_nodes[heap[1].index].node;
} }
Weight MinKey() const {
BOOST_ASSERT(heap.size() > 1);
return heap[1].weight;
}
NodeID DeleteMin() NodeID DeleteMin()
{ {
BOOST_ASSERT(heap.size() > 1); BOOST_ASSERT(heap.size() > 1);
@ -207,7 +212,7 @@ class BinaryHeap
void DeleteAll() void DeleteAll()
{ {
auto iend = heap.end(); auto iend = heap.end();
for (typename std::vector<HeapElement>::iterator i = heap.begin() + 1; i != iend; ++i) for (auto i = heap.begin() + 1; i != iend; ++i)
{ {
inserted_nodes[i->index].key = 0; inserted_nodes[i->index].key = 0;
} }
@ -232,7 +237,9 @@ class BinaryHeap
class HeapNode class HeapNode
{ {
public: public:
HeapNode(NodeID n, Key k, Weight w, Data d) : node(n), key(k), weight(w), data(d) {} HeapNode(NodeID n, Key k, Weight w, Data d) : node(n), key(k), weight(w), data(std::move(d))
{
}
NodeID node; NodeID node;
Key key; Key key;

View File

@ -25,7 +25,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#include "geometry_compressor.hpp" #include "compressed_edge_container.hpp"
#include "../util/simple_logger.hpp" #include "../util/simple_logger.hpp"
#include <boost/assert.hpp> #include <boost/assert.hpp>
@ -35,13 +35,15 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <limits> #include <limits>
#include <string> #include <string>
GeometryCompressor::GeometryCompressor() #include <iostream>
CompressedEdgeContainer::CompressedEdgeContainer()
{ {
m_free_list.reserve(100); m_free_list.reserve(100);
IncreaseFreeList(); IncreaseFreeList();
} }
void GeometryCompressor::IncreaseFreeList() void CompressedEdgeContainer::IncreaseFreeList()
{ {
m_compressed_geometries.resize(m_compressed_geometries.size() + 100); m_compressed_geometries.resize(m_compressed_geometries.size() + 100);
for (unsigned i = 100; i > 0; --i) for (unsigned i = 100; i > 0; --i)
@ -51,13 +53,13 @@ void GeometryCompressor::IncreaseFreeList()
} }
} }
bool GeometryCompressor::HasEntryForID(const EdgeID edge_id) const bool CompressedEdgeContainer::HasEntryForID(const EdgeID edge_id) const
{ {
auto iter = m_edge_id_to_list_index_map.find(edge_id); auto iter = m_edge_id_to_list_index_map.find(edge_id);
return iter != m_edge_id_to_list_index_map.end(); return iter != m_edge_id_to_list_index_map.end();
} }
unsigned GeometryCompressor::GetPositionForID(const EdgeID edge_id) const unsigned CompressedEdgeContainer::GetPositionForID(const EdgeID edge_id) const
{ {
auto map_iterator = m_edge_id_to_list_index_map.find(edge_id); auto map_iterator = m_edge_id_to_list_index_map.find(edge_id);
BOOST_ASSERT(map_iterator != m_edge_id_to_list_index_map.end()); BOOST_ASSERT(map_iterator != m_edge_id_to_list_index_map.end());
@ -65,7 +67,7 @@ unsigned GeometryCompressor::GetPositionForID(const EdgeID edge_id) const
return map_iterator->second; return map_iterator->second;
} }
void GeometryCompressor::SerializeInternalVector(const std::string &path) const void CompressedEdgeContainer::SerializeInternalVector(const std::string &path) const
{ {
boost::filesystem::fstream geometry_out_stream(path, std::ios::binary | std::ios::out); boost::filesystem::fstream geometry_out_stream(path, std::ios::binary | std::ios::out);
@ -108,7 +110,7 @@ void GeometryCompressor::SerializeInternalVector(const std::string &path) const
geometry_out_stream.close(); geometry_out_stream.close();
} }
void GeometryCompressor::CompressEdge(const EdgeID edge_id_1, void CompressedEdgeContainer::CompressEdge(const EdgeID edge_id_1,
const EdgeID edge_id_2, const EdgeID edge_id_2,
const NodeID via_node_id, const NodeID via_node_id,
const NodeID target_node_id, const NodeID target_node_id,
@ -153,6 +155,8 @@ void GeometryCompressor::CompressEdge(const EdgeID edge_id_1,
std::vector<CompressedNode> &edge_bucket_list1 = m_compressed_geometries[edge_bucket_id1]; std::vector<CompressedNode> &edge_bucket_list1 = m_compressed_geometries[edge_bucket_id1];
// note we don't save the start coordinate: it is implicitly given by edge 1
// weight1 is the distance to the (currently) last coordinate in the bucket
if (edge_bucket_list1.empty()) if (edge_bucket_list1.empty())
{ {
edge_bucket_list1.emplace_back(via_node_id, weight1); edge_bucket_list1.emplace_back(via_node_id, weight1);
@ -190,7 +194,7 @@ void GeometryCompressor::CompressEdge(const EdgeID edge_id_1,
} }
} }
void GeometryCompressor::PrintStatistics() const void CompressedEdgeContainer::PrintStatistics() const
{ {
const uint64_t compressed_edges = m_compressed_geometries.size(); const uint64_t compressed_edges = m_compressed_geometries.size();
BOOST_ASSERT(0 == compressed_edges % 2); BOOST_ASSERT(0 == compressed_edges % 2);
@ -215,20 +219,20 @@ void GeometryCompressor::PrintStatistics() const
std::max((uint64_t)1, compressed_edges); std::max((uint64_t)1, compressed_edges);
} }
const std::vector<GeometryCompressor::CompressedNode> & const CompressedEdgeContainer::EdgeBucket&
GeometryCompressor::GetBucketReference(const EdgeID edge_id) const CompressedEdgeContainer::GetBucketReference(const EdgeID edge_id) const
{ {
const unsigned index = m_edge_id_to_list_index_map.at(edge_id); const unsigned index = m_edge_id_to_list_index_map.at(edge_id);
return m_compressed_geometries.at(index); return m_compressed_geometries.at(index);
} }
NodeID GeometryCompressor::GetFirstNodeIDOfBucket(const EdgeID edge_id) const NodeID CompressedEdgeContainer::GetFirstEdgeTargetID(const EdgeID edge_id) const
{ {
const auto &bucket = GetBucketReference(edge_id); const auto &bucket = GetBucketReference(edge_id);
BOOST_ASSERT(bucket.size() >= 2); BOOST_ASSERT(bucket.size() >= 2);
return bucket[1].first; return bucket.front().first;
} }
NodeID GeometryCompressor::GetLastNodeIDOfBucket(const EdgeID edge_id) const NodeID CompressedEdgeContainer::GetLastEdgeSourceID(const EdgeID edge_id) const
{ {
const auto &bucket = GetBucketReference(edge_id); const auto &bucket = GetBucketReference(edge_id);
BOOST_ASSERT(bucket.size() >= 2); BOOST_ASSERT(bucket.size() >= 2);

View File

@ -35,12 +35,13 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <string> #include <string>
#include <vector> #include <vector>
class GeometryCompressor class CompressedEdgeContainer
{ {
public: public:
using CompressedNode = std::pair<NodeID, EdgeWeight>; using CompressedNode = std::pair<NodeID, EdgeWeight>;
using EdgeBucket = std::vector<CompressedNode>;
GeometryCompressor(); CompressedEdgeContainer();
void CompressEdge(const EdgeID surviving_edge_id, void CompressEdge(const EdgeID surviving_edge_id,
const EdgeID removed_edge_id, const EdgeID removed_edge_id,
const NodeID via_node_id, const NodeID via_node_id,
@ -52,16 +53,15 @@ class GeometryCompressor
void PrintStatistics() const; void PrintStatistics() const;
void SerializeInternalVector(const std::string &path) const; void SerializeInternalVector(const std::string &path) const;
unsigned GetPositionForID(const EdgeID edge_id) const; unsigned GetPositionForID(const EdgeID edge_id) const;
const std::vector<GeometryCompressor::CompressedNode> & const EdgeBucket& GetBucketReference(const EdgeID edge_id) const;
GetBucketReference(const EdgeID edge_id) const; NodeID GetFirstEdgeTargetID(const EdgeID edge_id) const;
NodeID GetFirstNodeIDOfBucket(const EdgeID edge_id) const; NodeID GetLastEdgeSourceID(const EdgeID edge_id) const;
NodeID GetLastNodeIDOfBucket(const EdgeID edge_id) const;
private: private:
int free_list_maximum = 0; int free_list_maximum = 0;
void IncreaseFreeList(); void IncreaseFreeList();
std::vector<std::vector<CompressedNode>> m_compressed_geometries; std::vector<EdgeBucket> m_compressed_geometries;
std::vector<unsigned> m_free_list; std::vector<unsigned> m_free_list;
std::unordered_map<EdgeID, unsigned> m_edge_id_to_list_index_map; std::unordered_map<EdgeID, unsigned> m_edge_id_to_list_index_map;
}; };

View File

@ -25,7 +25,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#include "coordinate_calculation.hpp" #include "../algorithms/coordinate_calculation.hpp"
#ifndef NDEBUG #ifndef NDEBUG
#include "../util/simple_logger.hpp" #include "../util/simple_logger.hpp"

View File

@ -36,6 +36,32 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <utility> #include <utility>
#include <vector> #include <vector>
template <typename ElementT> struct ConstDeallocatingVectorIteratorState
{
ConstDeallocatingVectorIteratorState()
: index(std::numeric_limits<std::size_t>::max()), bucket_list(nullptr)
{
}
explicit ConstDeallocatingVectorIteratorState(const ConstDeallocatingVectorIteratorState &r)
: index(r.index), bucket_list(r.bucket_list)
{
}
explicit ConstDeallocatingVectorIteratorState(const std::size_t idx,
const std::vector<ElementT *> *input_list)
: index(idx), bucket_list(input_list)
{
}
std::size_t index;
const std::vector<ElementT *> *bucket_list;
ConstDeallocatingVectorIteratorState &operator=(const ConstDeallocatingVectorIteratorState &other)
{
index = other.index;
bucket_list = other.bucket_list;
return *this;
}
};
template <typename ElementT> struct DeallocatingVectorIteratorState template <typename ElementT> struct DeallocatingVectorIteratorState
{ {
DeallocatingVectorIteratorState() DeallocatingVectorIteratorState()
@ -62,6 +88,55 @@ template <typename ElementT> struct DeallocatingVectorIteratorState
} }
}; };
template <typename ElementT, std::size_t ELEMENTS_PER_BLOCK>
class ConstDeallocatingVectorIterator
: public boost::iterator_facade<ConstDeallocatingVectorIterator<ElementT, ELEMENTS_PER_BLOCK>,
ElementT,
std::random_access_iterator_tag>
{
ConstDeallocatingVectorIteratorState<ElementT> current_state;
public:
ConstDeallocatingVectorIterator() {}
ConstDeallocatingVectorIterator(std::size_t idx, const std::vector<ElementT *> *input_list)
: current_state(idx, input_list)
{
}
friend class boost::iterator_core_access;
void advance(std::size_t n) { current_state.index += n; }
void increment() { advance(1); }
void decrement() { advance(-1); }
bool equal(ConstDeallocatingVectorIterator const &other) const
{
return current_state.index == other.current_state.index;
}
std::ptrdiff_t distance_to(ConstDeallocatingVectorIterator const &other) const
{
// it is important to implement it 'other minus this'. otherwise sorting breaks
return other.current_state.index - current_state.index;
}
ElementT &dereference() const
{
const std::size_t current_bucket = current_state.index / ELEMENTS_PER_BLOCK;
const std::size_t current_index = current_state.index % ELEMENTS_PER_BLOCK;
return (current_state.bucket_list->at(current_bucket)[current_index]);
}
ElementT &operator[](const std::size_t index) const
{
const std::size_t current_bucket = (index + current_state.index) / ELEMENTS_PER_BLOCK;
const std::size_t current_index = (index + current_state.index) % ELEMENTS_PER_BLOCK;
return (current_state.bucket_list->at(current_bucket)[current_index]);
}
};
template <typename ElementT, std::size_t ELEMENTS_PER_BLOCK> template <typename ElementT, std::size_t ELEMENTS_PER_BLOCK>
class DeallocatingVectorIterator class DeallocatingVectorIterator
: public boost::iterator_facade<DeallocatingVectorIterator<ElementT, ELEMENTS_PER_BLOCK>, : public boost::iterator_facade<DeallocatingVectorIterator<ElementT, ELEMENTS_PER_BLOCK>,
@ -170,7 +245,7 @@ class DeallocatingVector
public: public:
using iterator = DeallocatingVectorIterator<ElementT, ELEMENTS_PER_BLOCK>; using iterator = DeallocatingVectorIterator<ElementT, ELEMENTS_PER_BLOCK>;
using const_iterator = DeallocatingVectorIterator<ElementT, ELEMENTS_PER_BLOCK>; using const_iterator = ConstDeallocatingVectorIterator<ElementT, ELEMENTS_PER_BLOCK>;
// this forward-only iterator deallocates all buckets that have been visited // this forward-only iterator deallocates all buckets that have been visited
using deallocation_iterator = DeallocatingVectorRemoveIterator<ElementT, ELEMENTS_PER_BLOCK>; using deallocation_iterator = DeallocatingVectorRemoveIterator<ElementT, ELEMENTS_PER_BLOCK>;

View File

@ -90,9 +90,11 @@ template <typename EdgeDataT> class DynamicGraph
*/ */
template <class ContainerT> DynamicGraph(const NodeIterator nodes, const ContainerT &graph) template <class ContainerT> DynamicGraph(const NodeIterator nodes, const ContainerT &graph)
{ {
// we need to cast here because DeallocatingVector does not have a valid const iterator
BOOST_ASSERT(std::is_sorted(const_cast<ContainerT&>(graph).begin(), const_cast<ContainerT&>(graph).end()));
number_of_nodes = nodes; number_of_nodes = nodes;
number_of_edges = static_cast<EdgeIterator>(graph.size()); number_of_edges = static_cast<EdgeIterator>(graph.size());
// node_array.reserve(number_of_nodes + 1);
node_array.resize(number_of_nodes + 1); node_array.resize(number_of_nodes + 1);
EdgeIterator edge = 0; EdgeIterator edge = 0;
EdgeIterator position = 0; EdgeIterator position = 0;
@ -117,6 +119,7 @@ template <typename EdgeDataT> class DynamicGraph
node_array[node].first_edge + node_array[node].edges)) node_array[node].first_edge + node_array[node].edges))
{ {
edge_list[i].target = graph[edge].target; edge_list[i].target = graph[edge].target;
BOOST_ASSERT(edge_list[i].target < number_of_nodes);
edge_list[i].data = graph[edge].data; edge_list[i].data = graph[edge].data;
++edge; ++edge;
} }
@ -136,7 +139,7 @@ template <typename EdgeDataT> class DynamicGraph
unsigned degree = 0; unsigned degree = 0;
for (const auto edge : osrm::irange(BeginEdges(n), EndEdges(n))) for (const auto edge : osrm::irange(BeginEdges(n), EndEdges(n)))
{ {
if (GetEdgeData(edge).forward) if (!GetEdgeData(edge).reversed)
{ {
++degree; ++degree;
} }

View File

@ -37,6 +37,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <limits> #include <limits>
/// This is what StaticRTree serialized and stores on disk
/// It is generated in EdgeBasedGraphFactory.
struct EdgeBasedNode struct EdgeBasedNode
{ {
EdgeBasedNode() EdgeBasedNode()

View File

@ -91,13 +91,24 @@ template <class CandidateLists> struct HiddenMarkovModel
: breakage(candidates_list.size()), candidates_list(candidates_list), : breakage(candidates_list.size()), candidates_list(candidates_list),
emission_log_probability(emission_log_probability) emission_log_probability(emission_log_probability)
{ {
for (const auto &l : candidates_list) viterbi.resize(candidates_list.size());
parents.resize(candidates_list.size());
path_lengths.resize(candidates_list.size());
suspicious.resize(candidates_list.size());
pruned.resize(candidates_list.size());
breakage.resize(candidates_list.size());
for (const auto i : osrm::irange<std::size_t>(0u, candidates_list.size()))
{ {
viterbi.emplace_back(l.size()); const auto& num_candidates = candidates_list[i].size();
parents.emplace_back(l.size()); // add empty vectors
path_lengths.emplace_back(l.size()); if (num_candidates > 0)
suspicious.emplace_back(l.size()); {
pruned.emplace_back(l.size()); viterbi[i].resize(num_candidates);
parents[i].resize(num_candidates);
path_lengths[i].resize(num_candidates);
suspicious[i].resize(num_candidates);
pruned[i].resize(num_candidates);
}
} }
clear(0); clear(0);
@ -121,10 +132,11 @@ template <class CandidateLists> struct HiddenMarkovModel
std::size_t initialize(std::size_t initial_timestamp) std::size_t initialize(std::size_t initial_timestamp)
{ {
BOOST_ASSERT(initial_timestamp < candidates_list.size()); auto num_points = candidates_list.size();
do do
{ {
BOOST_ASSERT(initial_timestamp < num_points);
for (const auto s : osrm::irange<std::size_t>(0u, viterbi[initial_timestamp].size())) for (const auto s : osrm::irange<std::size_t>(0u, viterbi[initial_timestamp].size()))
{ {
viterbi[initial_timestamp][s] = viterbi[initial_timestamp][s] =
@ -139,9 +151,9 @@ template <class CandidateLists> struct HiddenMarkovModel
} }
++initial_timestamp; ++initial_timestamp;
} while (breakage[initial_timestamp - 1]); } while (initial_timestamp < num_points && breakage[initial_timestamp - 1]);
if (initial_timestamp >= viterbi.size()) if (initial_timestamp >= num_points)
{ {
return osrm::matching::INVALID_STATE; return osrm::matching::INVALID_STATE;
} }

View File

@ -57,7 +57,7 @@ uint64_t HilbertCode::BitInterleaving(const uint32_t latitude, const uint32_t lo
void HilbertCode::TransposeCoordinate(uint32_t *X) const void HilbertCode::TransposeCoordinate(uint32_t *X) const
{ {
uint32_t M = 1 << (32 - 1), P, Q, t; uint32_t M = 1u << (32 - 1), P, Q, t;
int i; int i;
// Inverse undo // Inverse undo
for (Q = M; Q > 1; Q >>= 1) for (Q = M; Q > 1; Q >>= 1)

View File

@ -49,7 +49,7 @@ bool NodeBasedEdge::operator<(const NodeBasedEdge &other) const
NodeBasedEdge::NodeBasedEdge() NodeBasedEdge::NodeBasedEdge()
: source(SPECIAL_NODEID), target(SPECIAL_NODEID), name_id(0), weight(0), forward(false), : source(SPECIAL_NODEID), target(SPECIAL_NODEID), name_id(0), weight(0), forward(false),
backward(false), roundabout(false), in_tiny_cc(false), backward(false), roundabout(false),
access_restricted(false), is_split(false), travel_mode(false) access_restricted(false), is_split(false), travel_mode(false)
{ {
} }
@ -61,12 +61,11 @@ NodeBasedEdge::NodeBasedEdge(NodeID source,
bool forward, bool forward,
bool backward, bool backward,
bool roundabout, bool roundabout,
bool in_tiny_cc,
bool access_restricted, bool access_restricted,
TravelMode travel_mode, TravelMode travel_mode,
bool is_split) bool is_split)
: source(source), target(target), name_id(name_id), weight(weight), forward(forward), : source(source), target(target), name_id(name_id), weight(weight), forward(forward),
backward(backward), roundabout(roundabout), in_tiny_cc(in_tiny_cc), backward(backward), roundabout(roundabout),
access_restricted(access_restricted), is_split(is_split), travel_mode(travel_mode) access_restricted(access_restricted), is_split(is_split), travel_mode(travel_mode)
{ {
} }

View File

@ -43,7 +43,6 @@ struct NodeBasedEdge
bool forward, bool forward,
bool backward, bool backward,
bool roundabout, bool roundabout,
bool in_tiny_cc,
bool access_restricted, bool access_restricted,
TravelMode travel_mode, TravelMode travel_mode,
bool is_split); bool is_split);
@ -55,7 +54,6 @@ struct NodeBasedEdge
bool forward : 1; bool forward : 1;
bool backward : 1; bool backward : 1;
bool roundabout : 1; bool roundabout : 1;
bool in_tiny_cc : 1;
bool access_restricted : 1; bool access_restricted : 1;
bool is_split : 1; bool is_split : 1;
TravelMode travel_mode : 4; TravelMode travel_mode : 4;

View File

@ -0,0 +1,70 @@
/*
Copyright (c) 2015, Project OSRM contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MATRIX_GRAPH_WRAPPER_H
#define MATRIX_GRAPH_WRAPPER_H
#include <vector>
#include <cstddef>
#include <iterator>
#include "../typedefs.h"
// This Wrapper provides all methods that are needed for TarjanSCC, when the graph is given in a
// matrix representation (e.g. as output from a distance table call)
template <typename T> class MatrixGraphWrapper
{
public:
MatrixGraphWrapper(std::vector<T> table, const std::size_t number_of_nodes)
: table_(std::move(table)), number_of_nodes_(number_of_nodes){};
std::size_t GetNumberOfNodes() const { return number_of_nodes_; }
std::vector<T> GetAdjacentEdgeRange(const NodeID node) const
{
std::vector<T> edges;
// find all valid adjacent edges and move to vector `edges`
for (std::size_t i = 0; i < number_of_nodes_; ++i)
{
if (*(std::begin(table_) + node * number_of_nodes_ + i) != INVALID_EDGE_WEIGHT)
{
edges.push_back(i);
}
}
return edges;
}
EdgeWeight GetTarget(const EdgeWeight edge) const { return edge; }
private:
const std::vector<T> table_;
const std::size_t number_of_nodes_;
};
#endif // MATRIX_GRAPH_WRAPPER_H

View File

@ -30,7 +30,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "dynamic_graph.hpp" #include "dynamic_graph.hpp"
#include "import_edge.hpp" #include "import_edge.hpp"
#include "../util/simple_logger.hpp" #include "../util/graph_utils.hpp"
#include <tbb/parallel_sort.h> #include <tbb/parallel_sort.h>
@ -39,196 +39,62 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
struct NodeBasedEdgeData struct NodeBasedEdgeData
{ {
NodeBasedEdgeData() NodeBasedEdgeData()
: distance(INVALID_EDGE_WEIGHT), edgeBasedNodeID(SPECIAL_NODEID), : distance(INVALID_EDGE_WEIGHT), edge_id(SPECIAL_NODEID),
nameID(std::numeric_limits<unsigned>::max()), isAccessRestricted(false), shortcut(false), name_id(std::numeric_limits<unsigned>::max()), access_restricted(false),
forward(false), backward(false), roundabout(false), ignore_in_grid(false), reversed(false), roundabout(false), travel_mode(TRAVEL_MODE_INACCESSIBLE)
travel_mode(TRAVEL_MODE_INACCESSIBLE) {
}
NodeBasedEdgeData(int distance, unsigned edge_id, unsigned name_id,
bool access_restricted, bool reversed,
bool roundabout, TravelMode travel_mode)
: distance(distance), edge_id(edge_id), name_id(name_id),
access_restricted(access_restricted), reversed(reversed),
roundabout(roundabout), travel_mode(travel_mode)
{ {
} }
int distance; int distance;
unsigned edgeBasedNodeID; unsigned edge_id;
unsigned nameID; unsigned name_id;
bool isAccessRestricted : 1; bool access_restricted : 1;
bool shortcut : 1; bool reversed : 1;
bool forward : 1;
bool backward : 1;
bool roundabout : 1; bool roundabout : 1;
bool ignore_in_grid : 1;
TravelMode travel_mode : 4; TravelMode travel_mode : 4;
void SwapDirectionFlags()
{
bool temp_flag = forward;
forward = backward;
backward = temp_flag;
}
bool IsCompatibleTo(const NodeBasedEdgeData &other) const bool IsCompatibleTo(const NodeBasedEdgeData &other) const
{ {
return (forward == other.forward) && (backward == other.backward) && return (reversed == other.reversed) && (name_id == other.name_id) &&
(nameID == other.nameID) && (ignore_in_grid == other.ignore_in_grid) &&
(travel_mode == other.travel_mode); (travel_mode == other.travel_mode);
} }
}; };
using NodeBasedDynamicGraph = DynamicGraph<NodeBasedEdgeData>; using NodeBasedDynamicGraph = DynamicGraph<NodeBasedEdgeData>;
inline bool validateNeighborHood(const NodeBasedDynamicGraph& graph, const NodeID source) /// Factory method to create NodeBasedDynamicGraph from NodeBasedEdges
{ /// The since DynamicGraph expects directed edges, we need to insert
for (auto edge = graph.BeginEdges(source); edge < graph.EndEdges(source); ++edge) /// two edges for undirected edges.
{
const auto& data = graph.GetEdgeData(edge);
if (!data.forward && !data.backward)
{
SimpleLogger().Write(logWARNING) << "Invalid edge directions";
return false;
}
auto target = graph.GetTarget(edge);
if (target == SPECIAL_NODEID)
{
SimpleLogger().Write(logWARNING) << "Invalid edge target";
return false;
}
bool found_reverse = false;
for (auto rev_edge = graph.BeginEdges(target); rev_edge < graph.EndEdges(target); ++rev_edge)
{
auto rev_target = graph.GetTarget(rev_edge);
if (rev_target == SPECIAL_NODEID)
{
SimpleLogger().Write(logWARNING) << "Invalid reverse edge target";
return false;
}
if (rev_target != source)
{
continue;
}
if (found_reverse)
{
SimpleLogger().Write(logWARNING) << "Found more than one reverse edge";
return false;
}
const auto& rev_data = graph.GetEdgeData(rev_edge);
// edge is incoming, this must be an outgoing edge
if (data.backward && !rev_data.forward)
{
SimpleLogger().Write(logWARNING) << "Found no outgoing edge to an incoming edge!";
return false;
}
// edge is bi-directional, reverse must be as well
if (data.forward && data.backward && (!rev_data.forward || !rev_data.backward))
{
SimpleLogger().Write(logWARNING) << "Found bi-directional edge that is not bi-directional to both ends";
return false;
}
found_reverse = true;
}
if (!found_reverse)
{
SimpleLogger().Write(logWARNING) << "Could not find reverse edge";
return false;
}
}
return true;
}
// This function checks if the overal graph is undirected (has an edge in each direction).
inline bool validateNodeBasedGraph(const NodeBasedDynamicGraph& graph)
{
for (auto source = 0u; source < graph.GetNumberOfNodes(); ++source)
{
if (!validateNeighborHood(graph, source))
{
return false;
}
}
return true;
}
// Factory method to create NodeBasedDynamicGraph from NodeBasedEdges
// The since DynamicGraph expects directed edges, we need to insert
// two edges for undirected edges.
inline std::shared_ptr<NodeBasedDynamicGraph> inline std::shared_ptr<NodeBasedDynamicGraph>
NodeBasedDynamicGraphFromImportEdges(int number_of_nodes, std::vector<NodeBasedEdge> &input_edge_list) NodeBasedDynamicGraphFromEdges(int number_of_nodes, const std::vector<NodeBasedEdge> &input_edge_list)
{ {
static_assert(sizeof(NodeBasedEdgeData) == 16, auto edges_list = directedEdgesFromCompressed<NodeBasedDynamicGraph::InputEdge>(input_edge_list,
"changing node based edge data size changes memory consumption"); [](NodeBasedDynamicGraph::InputEdge& output_edge, const NodeBasedEdge& input_edge)
DeallocatingVector<NodeBasedDynamicGraph::InputEdge> edges_list;
NodeBasedDynamicGraph::InputEdge edge;
// Since DynamicGraph assumes directed edges we have to make sure we transformed
// the compressed edge format into single directed edges. We do this to make sure
// every node also knows its incoming edges, not only its outgoing edges and use the backward=true
// flag to indicate which is which.
//
// We do the transformation in the following way:
//
// if the edge (a, b) is split:
// 1. this edge must be in only one direction, so its a --> b
// 2. there must be another directed edge b --> a somewhere in the data
// if the edge (a, b) is not split:
// 1. this edge be on of a --> b od a <-> b
// (a <-- b gets reducted to b --> a)
// 2. a --> b will be transformed to a --> b and b <-- a
// 3. a <-> b will be transformed to a <-> b and b <-> a (I think a --> b and b <-- a would work as well though)
for (const NodeBasedEdge &import_edge : input_edge_list)
{ {
// edges that are not forward get converted by flipping the end points output_edge.data.distance = static_cast<int>(input_edge.weight);
BOOST_ASSERT(import_edge.forward); BOOST_ASSERT(output_edge.data.distance > 0);
if (import_edge.forward) output_edge.data.roundabout = input_edge.roundabout;
{ output_edge.data.name_id = input_edge.name_id;
edge.source = import_edge.source; output_edge.data.access_restricted = input_edge.access_restricted;
edge.target = import_edge.target; output_edge.data.travel_mode = input_edge.travel_mode;
edge.data.forward = import_edge.forward;
edge.data.backward = import_edge.backward;
}
BOOST_ASSERT(edge.source != edge.target);
edge.data.distance = static_cast<int>(import_edge.weight);
BOOST_ASSERT(edge.data.distance > 0);
edge.data.shortcut = false;
edge.data.roundabout = import_edge.roundabout;
edge.data.ignore_in_grid = import_edge.in_tiny_cc;
edge.data.nameID = import_edge.name_id;
edge.data.isAccessRestricted = import_edge.access_restricted;
edge.data.travel_mode = import_edge.travel_mode;
edges_list.push_back(edge);
if (!import_edge.is_split)
{
using std::swap; // enable ADL
swap(edge.source, edge.target);
edge.data.SwapDirectionFlags();
edges_list.push_back(edge);
}
} }
);
tbb::parallel_sort(edges_list.begin(), edges_list.end()); tbb::parallel_sort(edges_list.begin(), edges_list.end());
auto graph = std::make_shared<NodeBasedDynamicGraph>( auto graph = std::make_shared<NodeBasedDynamicGraph>(
static_cast<NodeBasedDynamicGraph::NodeIterator>(number_of_nodes), edges_list); static_cast<NodeBasedDynamicGraph::NodeIterator>(number_of_nodes), edges_list);
#ifndef NDEBUG
BOOST_ASSERT(validateNodeBasedGraph(*graph));
#endif
return graph; return graph;
} }

View File

@ -88,8 +88,10 @@ struct PhantomNode
unsigned component_id; unsigned component_id;
FixedPointCoordinate location; FixedPointCoordinate location;
unsigned short fwd_segment_position; unsigned short fwd_segment_position;
TravelMode forward_travel_mode : 4; // note 4 bits would suffice for each,
TravelMode backward_travel_mode : 4; // but the saved byte would be padding anyway
TravelMode forward_travel_mode;
TravelMode backward_travel_mode;
int GetForwardWeightPlusOffset() const; int GetForwardWeightPlusOffset() const;
@ -108,6 +110,8 @@ struct PhantomNode
bool operator==(const PhantomNode &other) const; bool operator==(const PhantomNode &other) const;
}; };
static_assert(sizeof(PhantomNode) == 48, "PhantomNode has more padding then expected");
using PhantomNodeArray = std::vector<std::vector<PhantomNode>>; using PhantomNodeArray = std::vector<std::vector<PhantomNode>>;
class phantom_node_pair : public std::pair<PhantomNode, PhantomNode> class phantom_node_pair : public std::pair<PhantomNode, PhantomNode>

View File

@ -58,7 +58,7 @@ struct QueryEdge
QueryEdge() : source(SPECIAL_NODEID), target(SPECIAL_NODEID) {} QueryEdge() : source(SPECIAL_NODEID), target(SPECIAL_NODEID) {}
QueryEdge(NodeID source, NodeID target, EdgeData data) QueryEdge(NodeID source, NodeID target, EdgeData data)
: source(source), target(target), data(data) : source(source), target(target), data(std::move(data))
{ {
} }

View File

@ -0,0 +1,178 @@
/*
Copyright (c) 2015, Project OSRM contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "raster_source.hpp"
#include "../util/simple_logger.hpp"
#include "../util/timing_util.hpp"
#include <osrm/coordinate.hpp>
#include <cmath>
RasterSource::RasterSource(RasterGrid _raster_data,
std::size_t _width,
std::size_t _height,
int _xmin,
int _xmax,
int _ymin,
int _ymax)
: xstep(calcSize(_xmin, _xmax, _width)), ystep(calcSize(_ymin, _ymax, _height)),
raster_data(_raster_data), width(_width), height(_height), xmin(_xmin), xmax(_xmax),
ymin(_ymin), ymax(_ymax)
{
BOOST_ASSERT(xstep != 0);
BOOST_ASSERT(ystep != 0);
}
float RasterSource::calcSize(int min, int max, std::size_t count) const
{
BOOST_ASSERT(count > 0);
return (max - min) / (static_cast<float>(count) - 1);
}
// Query raster source for nearest data point
RasterDatum RasterSource::getRasterData(const int lon, const int lat) const
{
if (lon < xmin || lon > xmax || lat < ymin || lat > ymax)
{
return {};
}
const std::size_t xth = static_cast<std::size_t>(round((lon - xmin) / xstep));
const std::size_t yth = static_cast<std::size_t>(round((ymax - lat) / ystep));
return {raster_data(xth, yth)};
}
// Query raster source using bilinear interpolation
RasterDatum RasterSource::getRasterInterpolate(const int lon, const int lat) const
{
if (lon < xmin || lon > xmax || lat < ymin || lat > ymax)
{
return {};
}
const auto xthP = (lon - xmin) / xstep;
const auto ythP = (ymax - lat) / ystep;
const std::size_t top = static_cast<std::size_t>(fmax(floor(ythP), 0));
const std::size_t bottom = static_cast<std::size_t>(fmin(ceil(ythP), height - 1));
const std::size_t left = static_cast<std::size_t>(fmax(floor(xthP), 0));
const std::size_t right = static_cast<std::size_t>(fmin(ceil(xthP), width - 1));
// Calculate distances from corners for bilinear interpolation
const float fromLeft = (lon - left * xstep + xmin) / xstep;
const float fromTop = (ymax - top * ystep - lat) / ystep;
const float fromRight = 1 - fromLeft;
const float fromBottom = 1 - fromTop;
return {static_cast<std::int32_t>(raster_data(left, top) * (fromRight * fromBottom) +
raster_data(right, top) * (fromLeft * fromBottom) +
raster_data(left, bottom) * (fromRight * fromTop) +
raster_data(right, bottom) * (fromLeft * fromTop))};
}
// Load raster source into memory
int SourceContainer::loadRasterSource(const std::string &path_string,
double xmin,
double xmax,
double ymin,
double ymax,
std::size_t nrows,
std::size_t ncols)
{
const auto _xmin = static_cast<int>(xmin * COORDINATE_PRECISION);
const auto _xmax = static_cast<int>(xmax * COORDINATE_PRECISION);
const auto _ymin = static_cast<int>(ymin * COORDINATE_PRECISION);
const auto _ymax = static_cast<int>(ymax * COORDINATE_PRECISION);
const auto itr = LoadedSourcePaths.find(path_string);
if (itr != LoadedSourcePaths.end())
{
SimpleLogger().Write() << "[source loader] Already loaded source '" << path_string
<< "' at source_id " << itr->second;
return itr->second;
}
int source_id = static_cast<int>(LoadedSources.size());
SimpleLogger().Write() << "[source loader] Loading from " << path_string << " ... ";
TIMER_START(loading_source);
boost::filesystem::path filepath(path_string);
if (!boost::filesystem::exists(filepath))
{
throw osrm::exception("error reading: no such path");
}
RasterGrid rasterData{filepath, ncols, nrows};
RasterSource source{std::move(rasterData), ncols, nrows, _xmin, _xmax, _ymin, _ymax};
TIMER_STOP(loading_source);
LoadedSourcePaths.emplace(path_string, source_id);
LoadedSources.push_back(std::move(source));
SimpleLogger().Write() << "[source loader] ok, after " << TIMER_SEC(loading_source) << "s";
return source_id;
}
// External function for looking up nearest data point from a specified source
RasterDatum SourceContainer::getRasterDataFromSource(unsigned int source_id, int lon, int lat)
{
if (LoadedSources.size() < source_id + 1)
{
throw osrm::exception("error reading: no such loaded source");
}
BOOST_ASSERT(lat < (90 * COORDINATE_PRECISION));
BOOST_ASSERT(lat > (-90 * COORDINATE_PRECISION));
BOOST_ASSERT(lon < (180 * COORDINATE_PRECISION));
BOOST_ASSERT(lon > (-180 * COORDINATE_PRECISION));
const auto &found = LoadedSources[source_id];
return found.getRasterData(lon, lat);
}
// External function for looking up interpolated data from a specified source
RasterDatum
SourceContainer::getRasterInterpolateFromSource(unsigned int source_id, int lon, int lat)
{
if (LoadedSources.size() < source_id + 1)
{
throw osrm::exception("error reading: no such loaded source");
}
BOOST_ASSERT(lat < (90 * COORDINATE_PRECISION));
BOOST_ASSERT(lat > (-90 * COORDINATE_PRECISION));
BOOST_ASSERT(lon < (180 * COORDINATE_PRECISION));
BOOST_ASSERT(lon > (-180 * COORDINATE_PRECISION));
const auto &found = LoadedSources[source_id];
return found.getRasterInterpolate(lon, lat);
}

View File

@ -0,0 +1,175 @@
/*
Copyright (c) 2015, Project OSRM contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef RASTER_SOURCE_HPP
#define RASTER_SOURCE_HPP
#include "../util/osrm_exception.hpp"
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/spirit/include/qi_int.hpp>
#include <boost/spirit/include/qi.hpp>
#include <boost/algorithm/string/trim.hpp>
#include <boost/assert.hpp>
#include <unordered_map>
#include <iterator>
/**
\brief Small wrapper around raster source queries to optionally provide results
gracefully, depending on source bounds
*/
struct RasterDatum
{
static std::int32_t get_invalid() { return std::numeric_limits<std::int32_t>::max(); }
std::int32_t datum = get_invalid();
RasterDatum() = default;
RasterDatum(std::int32_t _datum) : datum(_datum) {}
};
class RasterGrid
{
public:
RasterGrid(const boost::filesystem::path &filepath, std::size_t _xdim, std::size_t _ydim)
{
xdim = _xdim;
ydim = _ydim;
_data.reserve(ydim * xdim);
boost::filesystem::ifstream stream(filepath);
if (!stream)
{
throw osrm::exception("Unable to open raster file.");
}
stream.seekg(0, std::ios_base::end);
std::string buffer;
buffer.resize(static_cast<std::size_t>(stream.tellg()));
stream.seekg(0, std::ios_base::beg);
BOOST_ASSERT(buffer.size() > 1);
stream.read(&buffer[0], static_cast<std::streamsize>(buffer.size()));
boost::algorithm::trim(buffer);
auto itr = buffer.begin();
auto end = buffer.end();
bool r = false;
try
{
r = boost::spirit::qi::parse(
itr, end, +boost::spirit::qi::int_ % +boost::spirit::qi::space, _data);
}
catch (std::exception const &ex)
{
throw osrm::exception(
std::string("Failed to read from raster source with exception: ") + ex.what());
}
if (!r || itr != end)
{
throw osrm::exception("Failed to parse raster source correctly.");
}
}
RasterGrid(const RasterGrid &) = default;
RasterGrid &operator=(const RasterGrid &) = default;
RasterGrid(RasterGrid &&) = default;
RasterGrid &operator=(RasterGrid &&) = default;
std::int32_t operator()(std::size_t x, std::size_t y) { return _data[y * xdim + x]; }
std::int32_t operator()(std::size_t x, std::size_t y) const { return _data[(y)*xdim + (x)]; }
private:
std::vector<std::int32_t> _data;
std::size_t xdim, ydim;
};
/**
\brief Stores raster source data in memory and provides lookup functions.
*/
class RasterSource
{
private:
const float xstep;
const float ystep;
float calcSize(int min, int max, std::size_t count) const;
public:
RasterGrid raster_data;
const std::size_t width;
const std::size_t height;
const int xmin;
const int xmax;
const int ymin;
const int ymax;
RasterDatum getRasterData(const int lon, const int lat) const;
RasterDatum getRasterInterpolate(const int lon, const int lat) const;
RasterSource(RasterGrid _raster_data,
std::size_t width,
std::size_t height,
int _xmin,
int _xmax,
int _ymin,
int _ymax);
};
class SourceContainer
{
public:
SourceContainer() = default;
int loadRasterSource(const std::string &path_string,
double xmin,
double xmax,
double ymin,
double ymax,
std::size_t nrows,
std::size_t ncols);
RasterDatum getRasterDataFromSource(unsigned int source_id, int lon, int lat);
RasterDatum getRasterInterpolateFromSource(unsigned int source_id, int lon, int lat);
private:
std::vector<RasterSource> LoadedSources;
std::unordered_map<std::string, int> LoadedSourcePaths;
};
#endif /* RASTER_SOURCE_HPP */

View File

@ -28,7 +28,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef RECTANGLE_HPP #ifndef RECTANGLE_HPP
#define RECTANGLE_HPP #define RECTANGLE_HPP
#include "coordinate_calculation.hpp" #include "../algorithms/coordinate_calculation.hpp"
#include <boost/assert.hpp> #include <boost/assert.hpp>

View File

@ -156,7 +156,7 @@ class RestrictionMap
bool bool
CheckIfTurnIsRestricted(const NodeID node_u, const NodeID node_v, const NodeID node_w) const; CheckIfTurnIsRestricted(const NodeID node_u, const NodeID node_v, const NodeID node_w) const;
std::size_t size() { return m_count; } std::size_t size() const { return m_count; }
private: private:
// check of node is the start of any restriction // check of node is the start of any restriction

View File

@ -36,7 +36,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
RouteParameters::RouteParameters() RouteParameters::RouteParameters()
: zoom_level(18), print_instructions(false), alternate_route(true), geometry(true), : zoom_level(18), print_instructions(false), alternate_route(true), geometry(true),
compression(true), deprecatedAPI(false), uturn_default(false), classify(false), compression(true), deprecatedAPI(false), uturn_default(false), classify(false),
matching_beta(-1.0), gps_precision(-1.0), check_sum(-1), num_results(1) matching_beta(5), gps_precision(5), check_sum(-1), num_results(1)
{ {
} }
@ -134,7 +134,7 @@ void RouteParameters::addCoordinate(
static_cast<int>(COORDINATE_PRECISION * boost::fusion::at_c<1>(received_coordinates))); static_cast<int>(COORDINATE_PRECISION * boost::fusion::at_c<1>(received_coordinates)));
} }
void RouteParameters::getCoordinatesFromGeometry(const std::string geometry_string) void RouteParameters::getCoordinatesFromGeometry(const std::string &geometry_string)
{ {
PolylineCompressor pc; PolylineCompressor pc;
coordinates = pc.decode_string(geometry_string); coordinates = pc.decode_string(geometry_string);

View File

@ -33,6 +33,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../routing_algorithms/many_to_many.hpp" #include "../routing_algorithms/many_to_many.hpp"
#include "../routing_algorithms/map_matching.hpp" #include "../routing_algorithms/map_matching.hpp"
#include "../routing_algorithms/shortest_path.hpp" #include "../routing_algorithms/shortest_path.hpp"
#include "../routing_algorithms/direct_shortest_path.hpp"
#include <type_traits> #include <type_traits>
@ -44,6 +45,7 @@ template <class DataFacadeT> class SearchEngine
public: public:
ShortestPathRouting<DataFacadeT> shortest_path; ShortestPathRouting<DataFacadeT> shortest_path;
DirectShortestPathRouting<DataFacadeT> direct_shortest_path;
AlternativeRouting<DataFacadeT> alternative_path; AlternativeRouting<DataFacadeT> alternative_path;
ManyToManyRouting<DataFacadeT> distance_table; ManyToManyRouting<DataFacadeT> distance_table;
MapMatching<DataFacadeT> map_matching; MapMatching<DataFacadeT> map_matching;
@ -51,6 +53,7 @@ template <class DataFacadeT> class SearchEngine
explicit SearchEngine(DataFacadeT *facade) explicit SearchEngine(DataFacadeT *facade)
: facade(facade), : facade(facade),
shortest_path(facade, engine_working_data), shortest_path(facade, engine_working_data),
direct_shortest_path(facade, engine_working_data),
alternative_path(facade, engine_working_data), alternative_path(facade, engine_working_data),
distance_table(facade, engine_working_data), distance_table(facade, engine_working_data),
map_matching(facade, engine_working_data) map_matching(facade, engine_working_data)

View File

@ -34,6 +34,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../typedefs.h" #include "../typedefs.h"
#include <osrm/coordinate.hpp> #include <osrm/coordinate.hpp>
#include <utility>
// Struct fits everything in one cache line // Struct fits everything in one cache line
struct SegmentInformation struct SegmentInformation
@ -48,7 +49,7 @@ struct SegmentInformation
bool necessary; bool necessary;
bool is_via_location; bool is_via_location;
explicit SegmentInformation(const FixedPointCoordinate &location, explicit SegmentInformation(FixedPointCoordinate location,
const NodeID name_id, const NodeID name_id,
const EdgeWeight duration, const EdgeWeight duration,
const float length, const float length,
@ -56,20 +57,20 @@ struct SegmentInformation
const bool necessary, const bool necessary,
const bool is_via_location, const bool is_via_location,
const TravelMode travel_mode) const TravelMode travel_mode)
: location(location), name_id(name_id), duration(duration), length(length), bearing(0), : location(std::move(location)), name_id(name_id), duration(duration), length(length),
turn_instruction(turn_instruction), travel_mode(travel_mode), necessary(necessary), bearing(0), turn_instruction(turn_instruction), travel_mode(travel_mode),
is_via_location(is_via_location) necessary(necessary), is_via_location(is_via_location)
{ {
} }
explicit SegmentInformation(const FixedPointCoordinate &location, explicit SegmentInformation(FixedPointCoordinate location,
const NodeID name_id, const NodeID name_id,
const EdgeWeight duration, const EdgeWeight duration,
const float length, const float length,
const TurnInstruction turn_instruction, const TurnInstruction turn_instruction,
const TravelMode travel_mode) const TravelMode travel_mode)
: location(location), name_id(name_id), duration(duration), length(length), bearing(0), : location(std::move(location)), name_id(name_id), duration(duration), length(length),
turn_instruction(turn_instruction), travel_mode(travel_mode), bearing(0), turn_instruction(turn_instruction), travel_mode(travel_mode),
necessary(turn_instruction != TurnInstruction::NoTurn), is_via_location(false) necessary(turn_instruction != TurnInstruction::NoTurn), is_via_location(false)
{ {
} }

View File

@ -126,7 +126,7 @@ class SharedMemory
shm = boost::interprocess::xsi_shared_memory(boost::interprocess::open_or_create, key, shm = boost::interprocess::xsi_shared_memory(boost::interprocess::open_or_create, key,
size); size);
#ifdef __linux__ #ifdef __linux__
if (-1 == shmctl(shm.get_shmid(), SHM_LOCK, 0)) if (-1 == shmctl(shm.get_shmid(), SHM_LOCK, nullptr))
{ {
if (ENOMEM == errno) if (ENOMEM == errno)
{ {

View File

@ -87,8 +87,11 @@ template <typename EdgeDataT, bool UseSharedMemory = false> class StaticGraph
return osrm::irange(BeginEdges(node), EndEdges(node)); return osrm::irange(BeginEdges(node), EndEdges(node));
} }
StaticGraph(const int nodes, std::vector<InputEdge> &graph) template<typename ContainerT>
StaticGraph(const int nodes, const ContainerT &graph)
{ {
BOOST_ASSERT(std::is_sorted(const_cast<ContainerT&>(graph).begin(), const_cast<ContainerT&>(graph).end()));
number_of_nodes = nodes; number_of_nodes = nodes;
number_of_edges = static_cast<EdgeIterator>(graph.size()); number_of_edges = static_cast<EdgeIterator>(graph.size());
node_array.resize(number_of_nodes + 1); node_array.resize(number_of_nodes + 1);

View File

@ -316,8 +316,8 @@ class StaticRTree
using IncrementalQueryNodeType = mapbox::util::variant<TreeNode, EdgeDataT>; using IncrementalQueryNodeType = mapbox::util::variant<TreeNode, EdgeDataT>;
struct IncrementalQueryCandidate struct IncrementalQueryCandidate
{ {
explicit IncrementalQueryCandidate(const float dist, const IncrementalQueryNodeType &node) explicit IncrementalQueryCandidate(const float dist, IncrementalQueryNodeType node)
: min_dist(dist), node(node) : min_dist(dist), node(std::move(node))
{ {
} }
@ -345,8 +345,8 @@ class StaticRTree
// Construct a packed Hilbert-R-Tree with Kamel-Faloutsos algorithm [1] // Construct a packed Hilbert-R-Tree with Kamel-Faloutsos algorithm [1]
explicit StaticRTree(const std::vector<EdgeDataT> &input_data_vector, explicit StaticRTree(const std::vector<EdgeDataT> &input_data_vector,
const std::string tree_node_filename, const std::string &tree_node_filename,
const std::string leaf_node_filename, const std::string &leaf_node_filename,
const std::vector<QueryNode> &coordinate_list) const std::vector<QueryNode> &coordinate_list)
: m_element_count(input_data_vector.size()), m_leaf_node_filename(leaf_node_filename) : m_element_count(input_data_vector.size()), m_leaf_node_filename(leaf_node_filename)
{ {
@ -365,8 +365,8 @@ class StaticRTree
[&input_data_vector, &input_wrapper_vector, &get_hilbert_number, &coordinate_list]( [&input_data_vector, &input_wrapper_vector, &get_hilbert_number, &coordinate_list](
const tbb::blocked_range<uint64_t> &range) const tbb::blocked_range<uint64_t> &range)
{ {
for (uint64_t element_counter = range.begin(); element_counter != range.end(); for (uint64_t element_counter = range.begin(), end = range.end();
++element_counter) element_counter != end; ++element_counter)
{ {
WrappedInputElement &current_wrapper = input_wrapper_vector[element_counter]; WrappedInputElement &current_wrapper = input_wrapper_vector[element_counter];
current_wrapper.m_array_index = element_counter; current_wrapper.m_array_index = element_counter;
@ -476,7 +476,7 @@ class StaticRTree
tbb::parallel_for(tbb::blocked_range<uint32_t>(0, search_tree_size), tbb::parallel_for(tbb::blocked_range<uint32_t>(0, search_tree_size),
[this, &search_tree_size](const tbb::blocked_range<uint32_t> &range) [this, &search_tree_size](const tbb::blocked_range<uint32_t> &range)
{ {
for (uint32_t i = range.begin(); i != range.end(); ++i) for (uint32_t i = range.begin(), end = range.end(); i != end; ++i)
{ {
TreeNode &current_tree_node = this->m_search_tree[i]; TreeNode &current_tree_node = this->m_search_tree[i];
for (uint32_t j = 0; j < current_tree_node.child_count; ++j) for (uint32_t j = 0; j < current_tree_node.child_count; ++j)
@ -553,7 +553,7 @@ class StaticRTree
const boost::filesystem::path &leaf_file, const boost::filesystem::path &leaf_file,
std::shared_ptr<CoordinateListT> coordinate_list) std::shared_ptr<CoordinateListT> coordinate_list)
: m_search_tree(tree_node_ptr, number_of_nodes), m_leaf_node_filename(leaf_file.string()), : m_search_tree(tree_node_ptr, number_of_nodes), m_leaf_node_filename(leaf_file.string()),
m_coordinate_list(coordinate_list) m_coordinate_list(std::move(coordinate_list))
{ {
// open leaf node file and store thread specific pointer // open leaf node file and store thread specific pointer
if (!boost::filesystem::exists(leaf_file)) if (!boost::filesystem::exists(leaf_file))
@ -664,7 +664,8 @@ class StaticRTree
while (!traversal_queue.empty()) while (!traversal_queue.empty())
{ {
const IncrementalQueryCandidate current_query_node = traversal_queue.top(); const IncrementalQueryCandidate current_query_node = traversal_queue.top();
if (current_query_node.min_dist > max_distance && inspected_elements > max_checked_elements) if (current_query_node.min_dist > max_distance &&
inspected_elements > max_checked_elements)
{ {
break; break;
} }
@ -783,32 +784,18 @@ class StaticRTree
// Returns elements within max_distance. // Returns elements within max_distance.
// If the minium of elements could not be found in the search radius, widen // If the minium of elements could not be found in the search radius, widen
// it until the minimum can be satisfied. // it until the minimum can be satisfied.
// At the number of returned nodes is capped at the given maximum.
bool IncrementalFindPhantomNodeForCoordinateWithDistance( bool IncrementalFindPhantomNodeForCoordinateWithDistance(
const FixedPointCoordinate &input_coordinate, const FixedPointCoordinate &input_coordinate,
std::vector<std::pair<PhantomNode, double>> &result_phantom_node_vector, std::vector<std::pair<PhantomNode, double>> &result_phantom_node_vector,
const double max_distance, const double max_distance,
const unsigned min_number_of_phantom_nodes,
const unsigned max_number_of_phantom_nodes,
const unsigned max_checked_elements = 4 * LEAF_NODE_SIZE) const unsigned max_checked_elements = 4 * LEAF_NODE_SIZE)
{ {
unsigned inspected_elements = 0; unsigned inspected_elements = 0;
unsigned number_of_elements_from_big_cc = 0;
unsigned number_of_elements_from_tiny_cc = 0;
// is true if a big cc was added to the queue to we also have a lower bound
// for them. it actives pruning for big components
bool has_big_cc = false;
unsigned pruned_elements = 0;
std::pair<double, double> projected_coordinate = { std::pair<double, double> projected_coordinate = {
mercator::lat2y(input_coordinate.lat / COORDINATE_PRECISION), mercator::lat2y(input_coordinate.lat / COORDINATE_PRECISION),
input_coordinate.lon / COORDINATE_PRECISION}; input_coordinate.lon / COORDINATE_PRECISION};
// upper bound pruning technique
upper_bound<float> pruning_bound(max_number_of_phantom_nodes);
// initialize queue with root element // initialize queue with root element
std::priority_queue<IncrementalQueryCandidate> traversal_queue; std::priority_queue<IncrementalQueryCandidate> traversal_queue;
traversal_queue.emplace(0.f, m_search_tree[0]); traversal_queue.emplace(0.f, m_search_tree[0]);
@ -818,6 +805,12 @@ class StaticRTree
const IncrementalQueryCandidate current_query_node = traversal_queue.top(); const IncrementalQueryCandidate current_query_node = traversal_queue.top();
traversal_queue.pop(); traversal_queue.pop();
if (current_query_node.min_dist > max_distance ||
inspected_elements >= max_checked_elements)
{
break;
}
if (current_query_node.node.template is<TreeNode>()) if (current_query_node.node.template is<TreeNode>())
{ // current object is a tree node { // current object is a tree node
const TreeNode &current_tree_node = const TreeNode &current_tree_node =
@ -839,16 +832,9 @@ class StaticRTree
// distance must be non-negative // distance must be non-negative
BOOST_ASSERT(0.f <= current_perpendicular_distance); BOOST_ASSERT(0.f <= current_perpendicular_distance);
if (pruning_bound.get() >= current_perpendicular_distance || if (current_perpendicular_distance <= max_distance)
(!has_big_cc && !current_edge.is_in_tiny_cc()))
{ {
pruning_bound.insert(current_perpendicular_distance);
traversal_queue.emplace(current_perpendicular_distance, current_edge); traversal_queue.emplace(current_perpendicular_distance, current_edge);
has_big_cc = has_big_cc || !current_edge.is_in_tiny_cc();
}
else
{
++pruned_elements;
} }
} }
} }
@ -865,10 +851,13 @@ class StaticRTree
child_rectangle.GetMinDist(input_coordinate); child_rectangle.GetMinDist(input_coordinate);
BOOST_ASSERT(0.f <= lower_bound_to_element); BOOST_ASSERT(0.f <= lower_bound_to_element);
if (lower_bound_to_element <= max_distance)
{
traversal_queue.emplace(lower_bound_to_element, child_tree_node); traversal_queue.emplace(lower_bound_to_element, child_tree_node);
} }
} }
} }
}
else else
{ // current object is a leaf node { // current object is a leaf node
++inspected_elements; ++inspected_elements;
@ -876,14 +865,6 @@ class StaticRTree
const EdgeDataT &current_segment = const EdgeDataT &current_segment =
current_query_node.node.template get<EdgeDataT>(); current_query_node.node.template get<EdgeDataT>();
// continue searching for the first segment from a big component
if (number_of_elements_from_big_cc == 0 &&
number_of_elements_from_tiny_cc >= max_number_of_phantom_nodes - 1 &&
current_segment.is_in_tiny_cc())
{
continue;
}
// check if it is smaller than what we had before // check if it is smaller than what we had before
float current_ratio = 0.f; float current_ratio = 0.f;
FixedPointCoordinate foot_point_coordinate_on_segment; FixedPointCoordinate foot_point_coordinate_on_segment;
@ -894,9 +875,7 @@ class StaticRTree
m_coordinate_list->at(current_segment.v), input_coordinate, m_coordinate_list->at(current_segment.v), input_coordinate,
projected_coordinate, foot_point_coordinate_on_segment, current_ratio); projected_coordinate, foot_point_coordinate_on_segment, current_ratio);
if (number_of_elements_from_big_cc > 0 && if (current_perpendicular_distance >= max_distance)
result_phantom_node_vector.size() >= min_number_of_phantom_nodes &&
current_perpendicular_distance >= max_distance)
{ {
traversal_queue = std::priority_queue<IncrementalQueryCandidate>{}; traversal_queue = std::priority_queue<IncrementalQueryCandidate>{};
continue; continue;
@ -920,36 +899,14 @@ class StaticRTree
// set forward and reverse weights on the phantom node // set forward and reverse weights on the phantom node
SetForwardAndReverseWeightsOnPhantomNode(current_segment, SetForwardAndReverseWeightsOnPhantomNode(current_segment,
result_phantom_node_vector.back().first); result_phantom_node_vector.back().first);
// update counts on what we found from which result class
if (current_segment.is_in_tiny_cc())
{ // found an element in tiny component
++number_of_elements_from_tiny_cc;
}
else
{ // found an element in a big component
++number_of_elements_from_big_cc;
}
} }
// stop the search by flushing the queue // stop the search by flushing the queue
if ((result_phantom_node_vector.size() >= max_number_of_phantom_nodes && if (inspected_elements >= max_checked_elements)
number_of_elements_from_big_cc > 0) ||
inspected_elements >= max_checked_elements)
{ {
traversal_queue = std::priority_queue<IncrementalQueryCandidate>{}; traversal_queue = std::priority_queue<IncrementalQueryCandidate>{};
} }
} }
// SimpleLogger().Write() << "result_phantom_node_vector.size(): " <<
// result_phantom_node_vector.size();
// SimpleLogger().Write() << "max_number_of_phantom_nodes: " << max_number_of_phantom_nodes;
// SimpleLogger().Write() << "number_of_elements_from_big_cc: " <<
// number_of_elements_from_big_cc;
// SimpleLogger().Write() << "number_of_elements_from_tiny_cc: " <<
// number_of_elements_from_tiny_cc;
// SimpleLogger().Write() << "inspected_elements: " << inspected_elements;
// SimpleLogger().Write() << "max_checked_elements: " << max_checked_elements;
// SimpleLogger().Write() << "pruned_elements: " << pruned_elements;
return !result_phantom_node_vector.empty(); return !result_phantom_node_vector.empty();
} }

View File

@ -164,6 +164,10 @@ int main(const int argc, const char *argv[])
{ {
throw osrm::exception("no geometry file found"); throw osrm::exception("no geometry file found");
} }
if (server_paths.find("core") == server_paths.end())
{
throw osrm::exception("no core file found");
}
ServerPaths::const_iterator paths_iterator = server_paths.find("hsgrdata"); ServerPaths::const_iterator paths_iterator = server_paths.find("hsgrdata");
BOOST_ASSERT(server_paths.end() != paths_iterator); BOOST_ASSERT(server_paths.end() != paths_iterator);
@ -199,6 +203,10 @@ int main(const int argc, const char *argv[])
BOOST_ASSERT(server_paths.end() != paths_iterator); BOOST_ASSERT(server_paths.end() != paths_iterator);
BOOST_ASSERT(!paths_iterator->second.empty()); BOOST_ASSERT(!paths_iterator->second.empty());
const boost::filesystem::path &geometries_data_path = paths_iterator->second; const boost::filesystem::path &geometries_data_path = paths_iterator->second;
paths_iterator = server_paths.find("core");
BOOST_ASSERT(server_paths.end() != paths_iterator);
BOOST_ASSERT(!paths_iterator->second.empty());
const boost::filesystem::path &core_marker_path = paths_iterator->second;
// determine segment to use // determine segment to use
bool segment2_in_use = SharedMemory::RegionExists(LAYOUT_2); bool segment2_in_use = SharedMemory::RegionExists(LAYOUT_2);
@ -222,8 +230,7 @@ int main(const int argc, const char *argv[])
// Allocate a memory layout in shared memory, deallocate previous // Allocate a memory layout in shared memory, deallocate previous
SharedMemory *layout_memory = SharedMemory *layout_memory =
SharedMemoryFactory::Get(layout_region, sizeof(SharedDataLayout)); SharedMemoryFactory::Get(layout_region, sizeof(SharedDataLayout));
SharedDataLayout *shared_layout_ptr = static_cast<SharedDataLayout *>(layout_memory->Ptr()); SharedDataLayout *shared_layout_ptr = new (layout_memory->Ptr()) SharedDataLayout();
shared_layout_ptr = new (layout_memory->Ptr()) SharedDataLayout();
shared_layout_ptr->SetBlockSize<char>(SharedDataLayout::FILE_INDEX_PATH, shared_layout_ptr->SetBlockSize<char>(SharedDataLayout::FILE_INDEX_PATH,
file_index_path.length() + 1); file_index_path.length() + 1);
@ -329,6 +336,13 @@ int main(const int argc, const char *argv[])
} }
shared_layout_ptr->SetBlockSize<char>(SharedDataLayout::TIMESTAMP, m_timestamp.length()); shared_layout_ptr->SetBlockSize<char>(SharedDataLayout::TIMESTAMP, m_timestamp.length());
// load core marker size
boost::filesystem::ifstream core_marker_file(core_marker_path, std::ios::binary);
uint32_t number_of_core_markers = 0;
core_marker_file.read((char *)&number_of_core_markers, sizeof(uint32_t));
shared_layout_ptr->SetBlockSize<unsigned>(SharedDataLayout::CORE_MARKER, number_of_core_markers);
// load coordinate size // load coordinate size
boost::filesystem::ifstream nodes_input_stream(nodes_data_path, std::ios::binary); boost::filesystem::ifstream nodes_input_stream(nodes_data_path, std::ios::binary);
unsigned coordinate_list_size = 0; unsigned coordinate_list_size = 0;
@ -509,6 +523,35 @@ int main(const int argc, const char *argv[])
} }
tree_node_file.close(); tree_node_file.close();
// load core markers
std::vector<char> unpacked_core_markers(number_of_core_markers);
core_marker_file.read((char *)unpacked_core_markers.data(), sizeof(char)*number_of_core_markers);
unsigned *core_marker_ptr = shared_layout_ptr->GetBlockPtr<unsigned, true>(
shared_memory_ptr, SharedDataLayout::CORE_MARKER);
for (auto i = 0u; i < number_of_core_markers; ++i)
{
BOOST_ASSERT(unpacked_core_markers[i] == 0 || unpacked_core_markers[i] == 1);
if (unpacked_core_markers[i] == 1)
{
const unsigned bucket = i / 32;
const unsigned offset = i % 32;
const unsigned value = [&]
{
unsigned return_value = 0;
if (0 != offset)
{
return_value = core_marker_ptr[bucket];
}
return return_value;
}();
core_marker_ptr[bucket] = (value | (1 << offset));
}
}
// load the nodes of the search graph // load the nodes of the search graph
QueryGraph::NodeArrayEntry *graph_node_list_ptr = QueryGraph::NodeArrayEntry *graph_node_list_ptr =
shared_layout_ptr->GetBlockPtr<QueryGraph::NodeArrayEntry, true>( shared_layout_ptr->GetBlockPtr<QueryGraph::NodeArrayEntry, true>(

View File

@ -28,7 +28,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "description_factory.hpp" #include "description_factory.hpp"
#include "../algorithms/polyline_formatter.hpp" #include "../algorithms/polyline_formatter.hpp"
#include "../data_structures/coordinate_calculation.hpp" #include "../algorithms/coordinate_calculation.hpp"
#include "../data_structures/internal_route_result.hpp" #include "../data_structures/internal_route_result.hpp"
#include "../data_structures/turn_instructions.hpp" #include "../data_structures/turn_instructions.hpp"
#include "../util/container.hpp" #include "../util/container.hpp"

View File

@ -28,7 +28,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef DESCRIPTOR_BASE_HPP #ifndef DESCRIPTOR_BASE_HPP
#define DESCRIPTOR_BASE_HPP #define DESCRIPTOR_BASE_HPP
#include "../data_structures/coordinate_calculation.hpp" #include "../algorithms/coordinate_calculation.hpp"
#include "../data_structures/internal_route_result.hpp" #include "../data_structures/internal_route_result.hpp"
#include "../data_structures/phantom_node.hpp" #include "../data_structures/phantom_node.hpp"
#include "../typedefs.h" #include "../typedefs.h"

View File

@ -52,15 +52,8 @@ template <class DataFacadeT> class JSONDescriptor final : public BaseDescriptor<
DescriptorConfig config; DescriptorConfig config;
DescriptionFactory description_factory, alternate_description_factory; DescriptionFactory description_factory, alternate_description_factory;
FixedPointCoordinate current; FixedPointCoordinate current;
unsigned entered_restricted_area_count;
struct RoundAbout
{
RoundAbout() : start_index(INT_MAX), name_id(INVALID_NAMEID), leave_at_exit(INT_MAX) {}
int start_index;
unsigned name_id;
int leave_at_exit;
} round_about;
public:
struct Segment struct Segment
{ {
Segment() : name_id(INVALID_NAMEID), length(-1), position(0) {} Segment() : name_id(INVALID_NAMEID), length(-1), position(0) {}
@ -69,11 +62,12 @@ template <class DataFacadeT> class JSONDescriptor final : public BaseDescriptor<
int length; int length;
unsigned position; unsigned position;
}; };
private:
std::vector<Segment> shortest_path_segments, alternative_path_segments; std::vector<Segment> shortest_path_segments, alternative_path_segments;
ExtractRouteNames<DataFacadeT, Segment> GenerateRouteNames; ExtractRouteNames<DataFacadeT, Segment> GenerateRouteNames;
public: public:
explicit JSONDescriptor(DataFacadeT *facade) : facade(facade), entered_restricted_area_count(0) explicit JSONDescriptor(DataFacadeT *facade) : facade(facade)
{ {
} }
@ -143,9 +137,7 @@ template <class DataFacadeT> class JSONDescriptor final : public BaseDescriptor<
} }
if (config.instructions) if (config.instructions)
{ {
osrm::json::Array json_route_instructions; osrm::json::Array json_route_instructions = BuildTextualDescription(description_factory, shortest_path_segments);
BuildTextualDescription(description_factory, json_route_instructions,
raw_route.shortest_path_length, shortest_path_segments);
json_result.values["route_instructions"] = json_route_instructions; json_result.values["route_instructions"] = json_route_instructions;
} }
description_factory.BuildRouteSummary(description_factory.get_entire_length(), description_factory.BuildRouteSummary(description_factory.get_entire_length(),
@ -222,9 +214,7 @@ template <class DataFacadeT> class JSONDescriptor final : public BaseDescriptor<
osrm::json::Array json_current_alt_instructions; osrm::json::Array json_current_alt_instructions;
if (config.instructions) if (config.instructions)
{ {
BuildTextualDescription( json_alt_instructions = BuildTextualDescription(alternate_description_factory, alternative_path_segments);
alternate_description_factory, json_current_alt_instructions,
raw_route.alternative_path_length, alternative_path_segments);
json_alt_instructions.values.push_back(json_current_alt_instructions); json_alt_instructions.values.push_back(json_current_alt_instructions);
json_result.values["alternative_instructions"] = json_alt_instructions; json_result.values["alternative_instructions"] = json_alt_instructions;
} }
@ -276,6 +266,11 @@ template <class DataFacadeT> class JSONDescriptor final : public BaseDescriptor<
json_result.values["alternative_names"] = json_alternate_names_array; json_result.values["alternative_names"] = json_alternate_names_array;
} }
json_result.values["hint_data"] = BuildHintData(raw_route);
}
inline osrm::json::Object BuildHintData(const InternalRouteResult& raw_route) const
{
osrm::json::Object json_hint_object; osrm::json::Object json_hint_object;
json_hint_object.values["checksum"] = facade->GetCheckSum(); json_hint_object.values["checksum"] = facade->GetCheckSum();
osrm::json::Array json_location_hint_array; osrm::json::Array json_location_hint_array;
@ -290,24 +285,27 @@ template <class DataFacadeT> class JSONDescriptor final : public BaseDescriptor<
hint); hint);
json_location_hint_array.values.push_back(hint); json_location_hint_array.values.push_back(hint);
json_hint_object.values["locations"] = json_location_hint_array; json_hint_object.values["locations"] = json_location_hint_array;
json_result.values["hint_data"] = json_hint_object;
// render the content to the output array return json_hint_object;
// TIMER_START(route_render);
// osrm::json::render(reply.content, json_result);
// TIMER_STOP(route_render);
// SimpleLogger().Write(logDEBUG) << "rendering took: " << TIMER_MSEC(route_render);
} }
// TODO: reorder parameters inline osrm::json::Array BuildTextualDescription(const DescriptionFactory &description_factory,
inline void BuildTextualDescription(DescriptionFactory &description_factory, std::vector<Segment> &route_segments_list) const
osrm::json::Array &json_instruction_array,
const int route_length,
std::vector<Segment> &route_segments_list)
{ {
osrm::json::Array json_instruction_array;
// Segment information has following format: // Segment information has following format:
//["instruction id","streetname",length,position,time,"length","earth_direction",azimuth] //["instruction id","streetname",length,position,time,"length","earth_direction",azimuth]
unsigned necessary_segments_running_index = 0; unsigned necessary_segments_running_index = 0;
struct RoundAbout
{
RoundAbout() : start_index(INT_MAX), name_id(INVALID_NAMEID), leave_at_exit(INT_MAX) {}
int start_index;
unsigned name_id;
int leave_at_exit;
} round_about;
round_about.leave_at_exit = 0; round_about.leave_at_exit = 0;
round_about.name_id = 0; round_about.name_id = 0;
std::string temp_dist, temp_length, temp_duration, temp_bearing, temp_instruction; std::string temp_dist, temp_length, temp_duration, temp_bearing, temp_instruction;
@ -317,7 +315,6 @@ template <class DataFacadeT> class JSONDescriptor final : public BaseDescriptor<
{ {
osrm::json::Array json_instruction_row; osrm::json::Array json_instruction_row;
TurnInstruction current_instruction = segment.turn_instruction; TurnInstruction current_instruction = segment.turn_instruction;
entered_restricted_area_count += (current_instruction != segment.turn_instruction);
if (TurnInstructionsClass::TurnIsNecessary(current_instruction)) if (TurnInstructionsClass::TurnIsNecessary(current_instruction))
{ {
if (TurnInstruction::EnterRoundAbout == current_instruction) if (TurnInstruction::EnterRoundAbout == current_instruction)
@ -386,6 +383,8 @@ template <class DataFacadeT> class JSONDescriptor final : public BaseDescriptor<
json_last_instruction_row.values.push_back(bearing::get(0.0)); json_last_instruction_row.values.push_back(bearing::get(0.0));
json_last_instruction_row.values.push_back(0.); json_last_instruction_row.values.push_back(0.);
json_instruction_array.values.push_back(json_last_instruction_row); json_instruction_array.values.push_back(json_last_instruction_row);
return json_instruction_array;
} }
}; };

21
docker/Dockerfile Normal file
View File

@ -0,0 +1,21 @@
FROM ubuntu:14.04
RUN apt-get update -y
RUN apt-get install -y build-essential git-core python-pip python-software-properties software-properties-common
RUN apt-get -y install gcc-4.8 g++-4.8 libboost1.55-all-dev llvm-3.4
RUN apt-get -y install libbz2-dev libstxxl-dev libstxxl1 libxml2-dev
RUN apt-get -y install libzip-dev lua5.1 liblua5.1-0-dev libtbb-dev libgdal-dev ruby1.9
RUN apt-get -y install curl cmake cmake-curses-gui
RUN pip install awscli
# luabind
RUN curl https://gist.githubusercontent.com/DennisOSRM/f2eb7b948e6fe1ae319e/raw/install-luabind.sh | sudo bash
# osmosis
RUN curl -s https://gist.githubusercontent.com/DennisOSRM/803a64a9178ec375069f/raw/ | sudo bash
RUN useradd -ms /bin/bash mapbox
USER mapbox
ENV HOME /home/mapbox
WORKDIR /home/mapbox

6
docker/README Normal file
View File

@ -0,0 +1,6 @@
# Docker based continious integration
Run ```./docker/build-image.sh``` to build a docker image.
The image contains all the build dependencies and the state of the local git repository.
Run ```./docker/run-gcc.sh``` to build OSRM with g++ and run all tests.

9
docker/build-image.sh Executable file
View File

@ -0,0 +1,9 @@
#!/usr/bin/env bash
set -e
set -o pipefail
docker build \
-t mapbox/osrm:linux \
docker/

11
docker/run-clang.sh Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/env bash
set -e
set -o pipefail
docker run \
-i \
-e "CXX=clang++" \
-v `pwd`:/home/mapbox/osrm-backend \
-t mapbox/osrm:linux \
osrm-backend/docker/test.sh

11
docker/run-gcc.sh Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/env bash
set -e
set -o pipefail
docker run \
-i \
-e "CXX=g++" \
-v `pwd`:/home/mapbox/osrm-backend \
-t mapbox/osrm:linux \
osrm-backend/docker/test.sh

22
docker/test.sh Executable file
View File

@ -0,0 +1,22 @@
#!/usr/bin/env bash
set -e
set -o pipefail
export CMAKEOPTIONS="-DCMAKE_BUILD_TYPE=Release"
export PATH=$PATH:/home/mapbox/.gem/ruby/1.9.1/bin:/home/mapbox/osrm-backend/vendor/bundle/ruby/1.9.1/bin
cd /home/mapbox/osrm-backend
gem install --user-install bundler
bundle install --path vendor/bundle
[ -d build ] && rm -rf build
mkdir -p build
cd build
cmake .. $CMAKEOPTIONS -DBUILD_TOOLS=1
make -j`nproc`
make tests -j`nproc`
./datastructure-tests
./algorithm-tests
cd ..
bundle exec cucumber -p verify

View File

@ -28,7 +28,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "extraction_containers.hpp" #include "extraction_containers.hpp"
#include "extraction_way.hpp" #include "extraction_way.hpp"
#include "../data_structures/coordinate_calculation.hpp" #include "../algorithms/coordinate_calculation.hpp"
#include "../data_structures/node_id.hpp" #include "../data_structures/node_id.hpp"
#include "../data_structures/range_table.hpp" #include "../data_structures/range_table.hpp"
@ -36,10 +36,14 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../util/simple_logger.hpp" #include "../util/simple_logger.hpp"
#include "../util/timing_util.hpp" #include "../util/timing_util.hpp"
#include "../util/fingerprint.hpp" #include "../util/fingerprint.hpp"
#include "../util/lua_util.hpp"
#include <boost/assert.hpp> #include <boost/assert.hpp>
#include <boost/filesystem.hpp> #include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp> #include <boost/filesystem/fstream.hpp>
#include <boost/ref.hpp>
#include <luabind/luabind.hpp>
#include <stxxl/sort> #include <stxxl/sort>
@ -76,7 +80,8 @@ ExtractionContainers::~ExtractionContainers()
*/ */
void ExtractionContainers::PrepareData(const std::string &output_file_name, void ExtractionContainers::PrepareData(const std::string &output_file_name,
const std::string &restrictions_file_name, const std::string &restrictions_file_name,
const std::string &name_file_name) const std::string &name_file_name,
lua_State *segment_state)
{ {
try try
{ {
@ -87,7 +92,7 @@ void ExtractionContainers::PrepareData(const std::string &output_file_name,
PrepareNodes(); PrepareNodes();
WriteNodes(file_out_stream); WriteNodes(file_out_stream);
PrepareEdges(); PrepareEdges(segment_state);
WriteEdges(file_out_stream); WriteEdges(file_out_stream);
file_out_stream.close(); file_out_stream.close();
@ -152,23 +157,47 @@ void ExtractionContainers::PrepareNodes()
TIMER_STOP(erasing_dups); TIMER_STOP(erasing_dups);
std::cout << "ok, after " << TIMER_SEC(erasing_dups) << "s" << std::endl; std::cout << "ok, after " << TIMER_SEC(erasing_dups) << "s" << std::endl;
std::cout << "[extractor] Building node id map ... " << std::flush;
TIMER_START(id_map);
external_to_internal_node_id_map.reserve(used_node_id_list.size());
for (NodeID i = 0u; i < used_node_id_list.size(); ++i)
external_to_internal_node_id_map[used_node_id_list[i]] = i;
TIMER_STOP(id_map);
std::cout << "ok, after " << TIMER_SEC(id_map) << "s" << std::endl;
std::cout << "[extractor] Sorting all nodes ... " << std::flush; std::cout << "[extractor] Sorting all nodes ... " << std::flush;
TIMER_START(sorting_nodes); TIMER_START(sorting_nodes);
stxxl::sort(all_nodes_list.begin(), all_nodes_list.end(), ExternalMemoryNodeSTXXLCompare(), stxxl::sort(all_nodes_list.begin(), all_nodes_list.end(), ExternalMemoryNodeSTXXLCompare(),
stxxl_memory); stxxl_memory);
TIMER_STOP(sorting_nodes); TIMER_STOP(sorting_nodes);
std::cout << "ok, after " << TIMER_SEC(sorting_nodes) << "s" << std::endl; std::cout << "ok, after " << TIMER_SEC(sorting_nodes) << "s" << std::endl;
std::cout << "[extractor] Building node id map ... " << std::flush;
TIMER_START(id_map);
external_to_internal_node_id_map.reserve(used_node_id_list.size());
auto node_iter = all_nodes_list.begin();
auto ref_iter = used_node_id_list.begin();
const auto all_nodes_list_end = all_nodes_list.end();
const auto used_node_id_list_end = used_node_id_list.end();
auto internal_id = 0u;
// compute the intersection of nodes that were referenced and nodes we actually have
while (node_iter != all_nodes_list_end && ref_iter != used_node_id_list_end)
{
if (node_iter->node_id < *ref_iter)
{
node_iter++;
continue;
}
if (node_iter->node_id > *ref_iter)
{
ref_iter++;
continue;
}
BOOST_ASSERT(node_iter->node_id == *ref_iter);
external_to_internal_node_id_map[*ref_iter] = internal_id++;
node_iter++;
ref_iter++;
}
max_internal_node_id = internal_id;
TIMER_STOP(id_map);
std::cout << "ok, after " << TIMER_SEC(id_map) << "s" << std::endl;
} }
void ExtractionContainers::PrepareEdges() void ExtractionContainers::PrepareEdges(lua_State *segment_state)
{ {
// Sort edges by start. // Sort edges by start.
std::cout << "[extractor] Sorting edges by start ... " << std::flush; std::cout << "[extractor] Sorting edges by start ... " << std::flush;
@ -182,10 +211,15 @@ void ExtractionContainers::PrepareEdges()
// Traverse list of edges and nodes in parallel and set start coord // Traverse list of edges and nodes in parallel and set start coord
auto node_iterator = all_nodes_list.begin(); auto node_iterator = all_nodes_list.begin();
auto edge_iterator = all_edges_list.begin(); auto edge_iterator = all_edges_list.begin();
while (edge_iterator != all_edges_list.end() && node_iterator != all_nodes_list.end())
const auto all_edges_list_end = all_edges_list.end();
const auto all_nodes_list_end = all_nodes_list.end();
while (edge_iterator != all_edges_list_end && node_iterator != all_nodes_list_end)
{ {
if (edge_iterator->result.source < node_iterator->node_id) if (edge_iterator->result.source < node_iterator->node_id)
{ {
SimpleLogger().Write(LogLevel::logWARNING) << "Found invalid node reference " << edge_iterator->result.source;
edge_iterator->result.source = SPECIAL_NODEID; edge_iterator->result.source = SPECIAL_NODEID;
++edge_iterator; ++edge_iterator;
continue; continue;
@ -232,7 +266,10 @@ void ExtractionContainers::PrepareEdges()
TIMER_START(compute_weights); TIMER_START(compute_weights);
node_iterator = all_nodes_list.begin(); node_iterator = all_nodes_list.begin();
edge_iterator = all_edges_list.begin(); edge_iterator = all_edges_list.begin();
while (edge_iterator != all_edges_list.end() && node_iterator != all_nodes_list.end()) const auto all_edges_list_end_ = all_edges_list.end();
const auto all_nodes_list_end_ = all_nodes_list.end();
while (edge_iterator != all_edges_list_end_ && node_iterator != all_nodes_list_end_)
{ {
// skip all invalid edges // skip all invalid edges
if (edge_iterator->result.source == SPECIAL_NODEID) if (edge_iterator->result.source == SPECIAL_NODEID)
@ -243,6 +280,7 @@ void ExtractionContainers::PrepareEdges()
if (edge_iterator->result.target < node_iterator->node_id) if (edge_iterator->result.target < node_iterator->node_id)
{ {
SimpleLogger().Write(LogLevel::logWARNING) << "Found invalid node reference " << edge_iterator->result.target;
edge_iterator->result.target = SPECIAL_NODEID; edge_iterator->result.target = SPECIAL_NODEID;
++edge_iterator; ++edge_iterator;
continue; continue;
@ -262,6 +300,16 @@ void ExtractionContainers::PrepareEdges()
edge_iterator->source_coordinate.lat, edge_iterator->source_coordinate.lon, edge_iterator->source_coordinate.lat, edge_iterator->source_coordinate.lon,
node_iterator->lat, node_iterator->lon); node_iterator->lat, node_iterator->lon);
if (lua_function_exists(segment_state, "segment_function"))
{
luabind::call_function<void>(
segment_state, "segment_function",
boost::cref(edge_iterator->source_coordinate),
boost::cref(*node_iterator),
distance,
boost::ref(edge_iterator->weight_data));
}
const double weight = [distance](const InternalExtractorEdge::WeightData& data) { const double weight = [distance](const InternalExtractorEdge::WeightData& data) {
switch (data.type) switch (data.type)
{ {
@ -427,15 +475,20 @@ void ExtractionContainers::WriteEdges(std::ofstream& file_out_stream) const
void ExtractionContainers::WriteNodes(std::ofstream& file_out_stream) const void ExtractionContainers::WriteNodes(std::ofstream& file_out_stream) const
{ {
unsigned number_of_used_nodes = 0;
// write dummy value, will be overwritten later // write dummy value, will be overwritten later
file_out_stream.write((char *)&number_of_used_nodes, sizeof(unsigned)); std::cout << "[extractor] setting number of nodes ... " << std::flush;
file_out_stream.write((char *)&max_internal_node_id, sizeof(unsigned));
std::cout << "ok" << std::endl;
std::cout << "[extractor] Confirming/Writing used nodes ... " << std::flush; std::cout << "[extractor] Confirming/Writing used nodes ... " << std::flush;
TIMER_START(write_nodes); TIMER_START(write_nodes);
// identify all used nodes by a merging step of two sorted lists // identify all used nodes by a merging step of two sorted lists
auto node_iterator = all_nodes_list.begin(); auto node_iterator = all_nodes_list.begin();
auto node_id_iterator = used_node_id_list.begin(); auto node_id_iterator = used_node_id_list.begin();
while (node_id_iterator != used_node_id_list.end() && node_iterator != all_nodes_list.end()) const auto used_node_id_list_end = used_node_id_list.end();
const auto all_nodes_list_end = all_nodes_list.end();
while (node_id_iterator != used_node_id_list_end && node_iterator != all_nodes_list_end)
{ {
if (*node_id_iterator < node_iterator->node_id) if (*node_id_iterator < node_iterator->node_id)
{ {
@ -451,21 +504,14 @@ void ExtractionContainers::WriteNodes(std::ofstream& file_out_stream) const
file_out_stream.write((char *)&(*node_iterator), sizeof(ExternalMemoryNode)); file_out_stream.write((char *)&(*node_iterator), sizeof(ExternalMemoryNode));
++number_of_used_nodes;
++node_id_iterator; ++node_id_iterator;
++node_iterator; ++node_iterator;
} }
TIMER_STOP(write_nodes); TIMER_STOP(write_nodes);
std::cout << "ok, after " << TIMER_SEC(write_nodes) << "s" << std::endl; std::cout << "ok, after " << TIMER_SEC(write_nodes) << "s" << std::endl;
std::cout << "[extractor] setting number of nodes ... " << std::flush;
std::ios::pos_type previous_file_position = file_out_stream.tellp();
file_out_stream.seekp(std::ios::beg + sizeof(FingerPrint));
file_out_stream.write((char *)&number_of_used_nodes, sizeof(unsigned));
file_out_stream.seekp(previous_file_position);
std::cout << "ok" << std::endl;
SimpleLogger().Write() << "Processed " << number_of_used_nodes << " nodes"; SimpleLogger().Write() << "Processed " << max_internal_node_id << " nodes";
} }
void ExtractionContainers::WriteRestrictions(const std::string& path) const void ExtractionContainers::WriteRestrictions(const std::string& path) const
@ -517,9 +563,11 @@ void ExtractionContainers::PrepareRestrictions()
TIMER_START(fix_restriction_starts); TIMER_START(fix_restriction_starts);
auto restrictions_iterator = restrictions_list.begin(); auto restrictions_iterator = restrictions_list.begin();
auto way_start_and_end_iterator = way_start_end_id_list.cbegin(); auto way_start_and_end_iterator = way_start_end_id_list.cbegin();
const auto restrictions_list_end = restrictions_list.end();
const auto way_start_end_id_list_end = way_start_end_id_list.cend();
while (way_start_and_end_iterator != way_start_end_id_list.cend() && while (way_start_and_end_iterator != way_start_end_id_list_end &&
restrictions_iterator != restrictions_list.end()) restrictions_iterator != restrictions_list_end)
{ {
if (way_start_and_end_iterator->way_id < restrictions_iterator->restriction.from.way) if (way_start_and_end_iterator->way_id < restrictions_iterator->restriction.from.way)
{ {
@ -583,8 +631,11 @@ void ExtractionContainers::PrepareRestrictions()
TIMER_START(fix_restriction_ends); TIMER_START(fix_restriction_ends);
restrictions_iterator = restrictions_list.begin(); restrictions_iterator = restrictions_list.begin();
way_start_and_end_iterator = way_start_end_id_list.cbegin(); way_start_and_end_iterator = way_start_end_id_list.cbegin();
while (way_start_and_end_iterator != way_start_end_id_list.cend() && const auto way_start_end_id_list_end_ = way_start_end_id_list.cend();
restrictions_iterator != restrictions_list.end()) const auto restrictions_list_end_ = restrictions_list.end();
while (way_start_and_end_iterator != way_start_end_id_list_end_ &&
restrictions_iterator != restrictions_list_end_)
{ {
if (way_start_and_end_iterator->way_id < restrictions_iterator->restriction.to.way) if (way_start_and_end_iterator->way_id < restrictions_iterator->restriction.to.way)
{ {

View File

@ -30,6 +30,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "internal_extractor_edge.hpp" #include "internal_extractor_edge.hpp"
#include "first_and_last_segment_of_way.hpp" #include "first_and_last_segment_of_way.hpp"
#include "scripting_environment.hpp"
#include "../data_structures/external_memory_node.hpp" #include "../data_structures/external_memory_node.hpp"
#include "../data_structures/restriction.hpp" #include "../data_structures/restriction.hpp"
@ -53,7 +54,7 @@ class ExtractionContainers
#endif #endif
void PrepareNodes(); void PrepareNodes();
void PrepareRestrictions(); void PrepareRestrictions();
void PrepareEdges(); void PrepareEdges(lua_State *segment_state);
void WriteNodes(std::ofstream& file_out_stream) const; void WriteNodes(std::ofstream& file_out_stream) const;
void WriteRestrictions(const std::string& restrictions_file_name) const; void WriteRestrictions(const std::string& restrictions_file_name) const;
@ -74,6 +75,7 @@ class ExtractionContainers
STXXLRestrictionsVector restrictions_list; STXXLRestrictionsVector restrictions_list;
STXXLWayIDStartEndVector way_start_end_id_list; STXXLWayIDStartEndVector way_start_end_id_list;
std::unordered_map<NodeID, NodeID> external_to_internal_node_id_map; std::unordered_map<NodeID, NodeID> external_to_internal_node_id_map;
unsigned max_internal_node_id;
ExtractionContainers(); ExtractionContainers();
@ -81,7 +83,8 @@ class ExtractionContainers
void PrepareData(const std::string &output_file_name, void PrepareData(const std::string &output_file_name,
const std::string &restrictions_file_name, const std::string &restrictions_file_name,
const std::string &names_file_name); const std::string &names_file_name,
lua_State *segment_state);
}; };
#endif /* EXTRACTION_CONTAINERS_HPP */ #endif /* EXTRACTION_CONTAINERS_HPP */

View File

@ -51,7 +51,6 @@ struct ExtractionWay
duration = -1; duration = -1;
roundabout = false; roundabout = false;
is_access_restricted = false; is_access_restricted = false;
ignore_in_grid = false;
name.clear(); name.clear();
forward_travel_mode = TRAVEL_MODE_DEFAULT; forward_travel_mode = TRAVEL_MODE_DEFAULT;
backward_travel_mode = TRAVEL_MODE_DEFAULT; backward_travel_mode = TRAVEL_MODE_DEFAULT;
@ -121,7 +120,6 @@ struct ExtractionWay
std::string name; std::string name;
bool roundabout; bool roundabout;
bool is_access_restricted; bool is_access_restricted;
bool ignore_in_grid;
TravelMode forward_travel_mode : 4; TravelMode forward_travel_mode : 4;
TravelMode backward_travel_mode : 4; TravelMode backward_travel_mode : 4;
}; };

View File

@ -34,10 +34,12 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "restriction_parser.hpp" #include "restriction_parser.hpp"
#include "scripting_environment.hpp" #include "scripting_environment.hpp"
#include "../data_structures/raster_source.hpp"
#include "../util/git_sha.hpp" #include "../util/git_sha.hpp"
#include "../util/make_unique.hpp" #include "../util/make_unique.hpp"
#include "../util/simple_logger.hpp" #include "../util/simple_logger.hpp"
#include "../util/timing_util.hpp" #include "../util/timing_util.hpp"
#include "../util/lua_util.hpp"
#include "../typedefs.h" #include "../typedefs.h"
@ -116,6 +118,17 @@ int extractor::run()
SimpleLogger().Write() << "Parsing in progress.."; SimpleLogger().Write() << "Parsing in progress..";
TIMER_START(parsing); TIMER_START(parsing);
lua_State *segment_state = scripting_environment.get_lua_state();
if (lua_function_exists(segment_state, "source_function"))
{
// bind a single instance of SourceContainer class to relevant lua state
SourceContainer sources;
luabind::globals(segment_state)["sources"] = sources;
luabind::call_function<void>(segment_state, "source_function");
}
std::string generator = header.get("generator"); std::string generator = header.get("generator");
if (generator.empty()) if (generator.empty())
{ {
@ -148,7 +161,7 @@ int extractor::run()
{ {
// create a vector of iterators into the buffer // create a vector of iterators into the buffer
std::vector<osmium::memory::Buffer::const_iterator> osm_elements; std::vector<osmium::memory::Buffer::const_iterator> osm_elements;
for (auto iter = std::begin(buffer); iter != std::end(buffer); ++iter) for (auto iter = std::begin(buffer), end = std::end(buffer); iter != end; ++iter)
{ {
osm_elements.push_back(iter); osm_elements.push_back(iter);
} }
@ -167,7 +180,7 @@ int extractor::run()
ExtractionWay result_way; ExtractionWay result_way;
lua_State *local_state = scripting_environment.get_lua_state(); lua_State *local_state = scripting_environment.get_lua_state();
for (auto x = range.begin(); x != range.end(); ++x) for (auto x = range.begin(), end = range.end(); x != end; ++x)
{ {
const auto entity = osm_elements[x]; const auto entity = osm_elements[x];
@ -238,7 +251,9 @@ int extractor::run()
extraction_containers.PrepareData(config.output_file_name, extraction_containers.PrepareData(config.output_file_name,
config.restriction_file_name, config.restriction_file_name,
config.names_file_name); config.names_file_name,
segment_state);
TIMER_STOP(extracting); TIMER_STOP(extracting);
SimpleLogger().Write() << "extraction finished after " << TIMER_SEC(extracting) << "s"; SimpleLogger().Write() << "extraction finished after " << TIMER_SEC(extracting) << "s";
SimpleLogger().Write() << "To prepare the data for routing, run: " SimpleLogger().Write() << "To prepare the data for routing, run: "

View File

@ -33,8 +33,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class extractor class extractor
{ {
public: public:
extractor(const ExtractorConfig &extractor_config) extractor(ExtractorConfig extractor_config) : config(std::move(extractor_config)) {}
: config(extractor_config) {}
int run(); int run();
private: private:
ExtractorConfig config; ExtractorConfig config;

View File

@ -183,8 +183,8 @@ void ExtractorCallbacks::ProcessWay(const osmium::Way &input_way, const Extracti
{ {
external_memory.all_edges_list.push_back(InternalExtractorEdge( external_memory.all_edges_list.push_back(InternalExtractorEdge(
first_node.ref(), last_node.ref(), name_id, backward_weight_data, first_node.ref(), last_node.ref(), name_id, backward_weight_data,
true, false, parsed_way.roundabout, parsed_way.ignore_in_grid, true, false, parsed_way.roundabout, parsed_way.is_access_restricted,
parsed_way.is_access_restricted, parsed_way.backward_travel_mode, false)); parsed_way.backward_travel_mode, false));
}); });
external_memory.way_start_end_id_list.push_back( external_memory.way_start_end_id_list.push_back(
@ -205,8 +205,8 @@ void ExtractorCallbacks::ProcessWay(const osmium::Way &input_way, const Extracti
{ {
external_memory.all_edges_list.push_back(InternalExtractorEdge( external_memory.all_edges_list.push_back(InternalExtractorEdge(
first_node.ref(), last_node.ref(), name_id, forward_weight_data, first_node.ref(), last_node.ref(), name_id, forward_weight_data,
true, !forward_only, parsed_way.roundabout, parsed_way.ignore_in_grid, true, !forward_only, parsed_way.roundabout, parsed_way.is_access_restricted,
parsed_way.is_access_restricted, parsed_way.forward_travel_mode, split_edge)); parsed_way.forward_travel_mode, split_edge));
}); });
if (split_edge) if (split_edge)
{ {
@ -216,8 +216,8 @@ void ExtractorCallbacks::ProcessWay(const osmium::Way &input_way, const Extracti
{ {
external_memory.all_edges_list.push_back(InternalExtractorEdge( external_memory.all_edges_list.push_back(InternalExtractorEdge(
first_node.ref(), last_node.ref(), name_id, backward_weight_data, first_node.ref(), last_node.ref(), name_id, backward_weight_data,
false, true, parsed_way.roundabout, parsed_way.ignore_in_grid, false, true, parsed_way.roundabout, parsed_way.is_access_restricted,
parsed_way.is_access_restricted, parsed_way.backward_travel_mode, true)); parsed_way.backward_travel_mode, true));
}); });
} }

View File

@ -35,6 +35,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <boost/assert.hpp> #include <boost/assert.hpp>
#include <osrm/coordinate.hpp> #include <osrm/coordinate.hpp>
#include <utility>
struct InternalExtractorEdge struct InternalExtractorEdge
{ {
@ -62,7 +63,7 @@ struct InternalExtractorEdge
}; };
explicit InternalExtractorEdge() explicit InternalExtractorEdge()
: result(0, 0, 0, 0, false, false, false, false, false, : result(0, 0, 0, 0, false, false, false, false,
TRAVEL_MODE_INACCESSIBLE, false) TRAVEL_MODE_INACCESSIBLE, false)
{ {
} }
@ -70,17 +71,24 @@ struct InternalExtractorEdge
explicit InternalExtractorEdge(NodeID source, explicit InternalExtractorEdge(NodeID source,
NodeID target, NodeID target,
NodeID name_id, NodeID name_id,
const WeightData& weight_data, WeightData weight_data,
bool forward, bool forward,
bool backward, bool backward,
bool roundabout, bool roundabout,
bool in_tiny_cc,
bool access_restricted, bool access_restricted,
TravelMode travel_mode, TravelMode travel_mode,
bool is_split) bool is_split)
: result(source, target, name_id, 0, forward, backward, roundabout, : result(source,
in_tiny_cc, access_restricted, travel_mode, is_split), target,
weight_data(weight_data) name_id,
0,
forward,
backward,
roundabout,
access_restricted,
travel_mode,
is_split),
weight_data(std::move(weight_data))
{ {
} }
@ -96,11 +104,11 @@ struct InternalExtractorEdge
static InternalExtractorEdge min_value() static InternalExtractorEdge min_value()
{ {
return InternalExtractorEdge(0, 0, 0, WeightData(), false, false, false, return InternalExtractorEdge(0, 0, 0, WeightData(), false, false, false,
false, false, TRAVEL_MODE_INACCESSIBLE, false); false, TRAVEL_MODE_INACCESSIBLE, false);
} }
static InternalExtractorEdge max_value() static InternalExtractorEdge max_value()
{ {
return InternalExtractorEdge(SPECIAL_NODEID, SPECIAL_NODEID, 0, WeightData(), false, false, return InternalExtractorEdge(SPECIAL_NODEID, SPECIAL_NODEID, 0, WeightData(), false,
false, false, false, TRAVEL_MODE_INACCESSIBLE, false); false, false, false, TRAVEL_MODE_INACCESSIBLE, false);
} }
}; };

0
extractor/lat Normal file
View File

View File

@ -35,6 +35,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <boost/algorithm/string.hpp> #include <boost/algorithm/string.hpp>
#include <boost/algorithm/string/regex.hpp> #include <boost/algorithm/string/regex.hpp>
#include <boost/algorithm/string/predicate.hpp>
#include <boost/ref.hpp> #include <boost/ref.hpp>
#include <boost/regex.hpp> #include <boost/regex.hpp>
@ -51,8 +52,7 @@ int lua_error_callback(lua_State *lua_state)
} }
} }
RestrictionParser::RestrictionParser(lua_State *lua_state) RestrictionParser::RestrictionParser(lua_State *lua_state) : use_turn_restrictions(true)
: use_turn_restrictions(true)
{ {
ReadUseRestrictionsSetting(lua_state); ReadUseRestrictionsSetting(lua_state);
@ -141,17 +141,31 @@ RestrictionParser::TryParse(const osmium::Relation &relation) const
bool is_only_restriction = false; bool is_only_restriction = false;
for (auto iter = fi_begin; iter != fi_end; ++iter) for (; fi_begin != fi_end; ++fi_begin)
{ {
if (std::string("restriction") == iter->key() || const std::string key(fi_begin->key());
std::string("restriction::hgv") == iter->key()) const std::string value(fi_begin->value());
{
const std::string restriction_value(iter->value());
if (restriction_value.find("only_") == 0) if (value.find("only_") == 0)
{ {
is_only_restriction = true; is_only_restriction = true;
} }
// if the "restriction*" key is longer than 11 chars, it is a conditional exception (i.e.
// "restriction:<transportation_type>")
if (key.size() > 11)
{
const auto ex_suffix = [&](const std::string &exception)
{
return boost::algorithm::ends_with(key, exception);
};
bool is_actually_restricted =
std::any_of(begin(restriction_exceptions), end(restriction_exceptions), ex_suffix);
if (!is_actually_restricted)
{
return mapbox::util::optional<InputRestrictionContainer>();
}
} }
} }

View File

@ -30,13 +30,16 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "extraction_helper_functions.hpp" #include "extraction_helper_functions.hpp"
#include "extraction_node.hpp" #include "extraction_node.hpp"
#include "extraction_way.hpp" #include "extraction_way.hpp"
#include "internal_extractor_edge.hpp"
#include "../data_structures/external_memory_node.hpp" #include "../data_structures/external_memory_node.hpp"
#include "../data_structures/raster_source.hpp"
#include "../util/lua_util.hpp" #include "../util/lua_util.hpp"
#include "../util/osrm_exception.hpp" #include "../util/osrm_exception.hpp"
#include "../util/simple_logger.hpp" #include "../util/simple_logger.hpp"
#include "../typedefs.h" #include "../typedefs.h"
#include <luabind/tag_function.hpp> #include <luabind/tag_function.hpp>
#include <luabind/operator.hpp>
#include <osmium/osm.hpp> #include <osmium/osm.hpp>
@ -80,6 +83,13 @@ void ScriptingEnvironment::init_lua_state(lua_State *lua_state)
luabind::def("print", LUA_print<std::string>), luabind::def("print", LUA_print<std::string>),
luabind::def("durationIsValid", durationIsValid), luabind::def("durationIsValid", durationIsValid),
luabind::def("parseDuration", parseDuration), luabind::def("parseDuration", parseDuration),
luabind::class_<SourceContainer>("sources")
.def(luabind::constructor<>())
.def("load", &SourceContainer::loadRasterSource)
.def("query", &SourceContainer::getRasterDataFromSource)
.def("interpolate", &SourceContainer::getRasterInterpolateFromSource),
luabind::class_<const float>("constants")
.enum_("enums")[luabind::value("precision", COORDINATE_PRECISION)],
luabind::class_<std::vector<std::string>>("vector") luabind::class_<std::vector<std::string>>("vector")
.def("Add", static_cast<void (std::vector<std::string>::*)(const std::string &)>( .def("Add", static_cast<void (std::vector<std::string>::*)(const std::string &)>(
@ -107,7 +117,6 @@ void ScriptingEnvironment::init_lua_state(lua_State *lua_state)
.def_readwrite("name", &ExtractionWay::name) .def_readwrite("name", &ExtractionWay::name)
.def_readwrite("roundabout", &ExtractionWay::roundabout) .def_readwrite("roundabout", &ExtractionWay::roundabout)
.def_readwrite("is_access_restricted", &ExtractionWay::is_access_restricted) .def_readwrite("is_access_restricted", &ExtractionWay::is_access_restricted)
.def_readwrite("ignore_in_index", &ExtractionWay::ignore_in_grid)
.def_readwrite("duration", &ExtractionWay::duration) .def_readwrite("duration", &ExtractionWay::duration)
.property("forward_mode", &ExtractionWay::get_forward_mode, .property("forward_mode", &ExtractionWay::get_forward_mode,
&ExtractionWay::set_forward_mode) &ExtractionWay::set_forward_mode)
@ -122,7 +131,21 @@ void ScriptingEnvironment::init_lua_state(lua_State *lua_state)
luabind::class_<osmium::Way>("Way") luabind::class_<osmium::Way>("Way")
.def("get_value_by_key", &osmium::Way::get_value_by_key) .def("get_value_by_key", &osmium::Way::get_value_by_key)
.def("get_value_by_key", &get_value_by_key<osmium::Way>) .def("get_value_by_key", &get_value_by_key<osmium::Way>)
.def("id", &osmium::Way::id) .def("id", &osmium::Way::id),
luabind::class_<InternalExtractorEdge>("EdgeSource")
.property("source_coordinate", &InternalExtractorEdge::source_coordinate)
.property("weight_data", &InternalExtractorEdge::weight_data),
luabind::class_<InternalExtractorEdge::WeightData>("WeightData")
.def_readwrite("speed", &InternalExtractorEdge::WeightData::speed),
luabind::class_<ExternalMemoryNode>("EdgeTarget")
.property("lat", &ExternalMemoryNode::lat)
.property("lon", &ExternalMemoryNode::lon),
luabind::class_<FixedPointCoordinate>("Coordinate")
.property("lat", &FixedPointCoordinate::lat)
.property("lon", &FixedPointCoordinate::lon),
luabind::class_<RasterDatum>("RasterDatum")
.property("datum", &RasterDatum::datum)
.def("invalid_data", &RasterDatum::get_invalid)
]; ];
if (0 != luaL_dofile(lua_state, file_name.c_str())) if (0 != luaL_dofile(lua_state, file_name.c_str()))

View File

View File

@ -19,6 +19,7 @@ Feature: Barriers
| wall | | | wall | |
| fence | | | fence | |
| some_tag | | | some_tag | |
| block | x |
Scenario: Bike - Access tag trumphs barriers Scenario: Bike - Access tag trumphs barriers
Then routability should be Then routability should be

View File

@ -42,6 +42,6 @@ Feature: Bicycle - Handle movable bridge
When I route I should get When I route I should get
| from | to | route | modes | speed | | from | to | route | modes | speed |
| a | g | abc,cde,efg | 1,5,1 | 5 km/h | | a | g | abc,cde,efg | 1,5,1 | 5 km/h |
| b | f | abc,cde,efg | 1,5,1 | 3 km/h | | b | f | abc,cde,efg | 1,5,1 | 4 km/h |
| c | e | cde | 5 | 2 km/h | | c | e | cde | 5 | 2 km/h |
| e | c | cde | 5 | 2 km/h | | e | c | cde | 5 | 2 km/h |

View File

@ -132,6 +132,8 @@ Feature: Car - Restricted access
| residential | | yes | no | | | | residential | | yes | no | | |
| motorway | yes | permissive | | private | | | motorway | yes | permissive | | private | |
| trunk | agricultural | designated | permissive | no | | | trunk | agricultural | designated | permissive | no | |
| pedestrian | | | | | |
| pedestrian | | | | destination | x |
Scenario: Car - Ignore access tags for other modes Scenario: Car - Ignore access tags for other modes
Then routability should be Then routability should be

View File

@ -19,6 +19,7 @@ Feature: Car - Barriers
| wall | | | wall | |
| fence | | | fence | |
| some_tag | | | some_tag | |
| block | |
Scenario: Car - Access tag trumphs barriers Scenario: Car - Access tag trumphs barriers
Then routability should be Then routability should be
@ -37,3 +38,10 @@ Feature: Car - Barriers
| wall | no | | | wall | no | |
| wall | private | | | wall | private | |
| wall | agricultural | | | wall | agricultural | |
Scenario: Car - Rising bollard exception for barriers
Then routability should be
| node/barrier | node/bollard | bothw |
| bollard | | |
| bollard | rising | x |
| bollard | removable | |

View File

@ -41,7 +41,7 @@ Feature: Car - Handle movable bridge
When I route I should get When I route I should get
| from | to | route | modes | speed | | from | to | route | modes | speed |
| a | g | abc,cde,efg | 1,3,1 | 6 km/h | | a | g | abc,cde,efg | 1,3,1 | 7 km/h |
| b | f | abc,cde,efg | 1,3,1 | 4 km/h | | b | f | abc,cde,efg | 1,3,1 | 5 km/h |
| c | e | cde | 3 | 2 km/h | | c | e | cde | 3 | 2 km/h |
| e | c | cde | 3 | 2 km/h | | e | c | cde | 3 | 2 km/h |

View File

@ -41,7 +41,7 @@ Feature: Car - Handle ferry routes
When I route I should get When I route I should get
| from | to | route | modes | speed | | from | to | route | modes | speed |
| a | g | abc,cde,efg | 1,2,1 | 26 km/h | | a | g | abc,cde,efg | 1,2,1 | 25 km/h |
| b | f | abc,cde,efg | 1,2,1 | 20 km/h | | b | f | abc,cde,efg | 1,2,1 | 20 km/h |
| c | e | cde | 2 | 12 km/h | | c | e | cde | 2 | 12 km/h |
| e | c | cde | 2 | 12 km/h | | e | c | cde | 2 | 12 km/h |
@ -60,7 +60,7 @@ Feature: Car - Handle ferry routes
When I route I should get When I route I should get
| from | to | route | modes | speed | | from | to | route | modes | speed |
| a | g | abc,cde,efg | 1,2,1 | 26 km/h | | a | g | abc,cde,efg | 1,2,1 | 25 km/h |
| b | f | abc,cde,efg | 1,2,1 | 20 km/h | | b | f | abc,cde,efg | 1,2,1 | 20 km/h |
| c | e | cde | 2 | 12 km/h | | c | e | cde | 2 | 12 km/h |
| e | c | cde | 2 | 12 km/h | | e | c | cde | 2 | 12 km/h |

View File

@ -23,10 +23,10 @@ OSRM will use 4/5 of the projected free-flow speed.
| from | to | route | speed | | from | to | route | speed |
| a | b | ab | 78 km/h | | a | b | ab | 78 km/h |
| b | c | bc | 59 km/h +- 1 | | b | c | bc | 59 km/h +- 1 |
| c | d | cd | 50 km/h | | c | d | cd | 51 km/h |
| d | e | de | 75 km/h | | d | e | de | 75 km/h |
| e | f | ef | 90 km/h | | e | f | ef | 90 km/h |
| f | g | fg | 105 km/h | | f | g | fg | 106 km/h |
Scenario: Car - Do not ignore maxspeed when higher than way speed Scenario: Car - Do not ignore maxspeed when higher than way speed
Given the node map Given the node map
@ -42,7 +42,7 @@ OSRM will use 4/5 of the projected free-flow speed.
| from | to | route | speed | | from | to | route | speed |
| a | b | ab | 31 km/h | | a | b | ab | 31 km/h |
| b | c | bc | 83 km/h +- 1 | | b | c | bc | 83 km/h +- 1 |
| c | d | cd | 50 km/h | | c | d | cd | 51 km/h |
Scenario: Car - Forward/backward maxspeed Scenario: Car - Forward/backward maxspeed
Given a grid size of 100 meters Given a grid size of 100 meters
@ -119,4 +119,3 @@ OSRM will use 4/5 of the projected free-flow speed.
| primary | 30 | 1 | -1 | | 34 km/h | | primary | 30 | 1 | -1 | | 34 km/h |
| primary | 30 | 1 | | 15 km/h | 15 km/h | | primary | 30 | 1 | | 15 km/h | 15 km/h |
| primary | 30 | 2 | | 34 km/h | 34 km/h | | primary | 30 | 2 | | 34 km/h | 34 km/h |

View File

@ -226,6 +226,54 @@ Feature: Car - Turn restrictions
| s | n | sj,nj | | s | n | sj,nj |
| s | e | | | s | e | |
@specific
Scenario: Car - :hgv-qualified on a standard turn restriction
Given the node map
| | n | |
| w | j | e |
| | s | |
And the ways
| nodes | oneway |
| sj | yes |
| nj | -1 |
| wj | -1 |
| ej | -1 |
And the relations
| type | way:from | way:to | node:via | restriction:hgv |
| restriction | sj | nj | j | no_straight_on |
When I route I should get
| from | to | route |
| s | w | sj,wj |
| s | n | sj,nj |
| s | e | sj,ej |
@specific
Scenario: Car - :motorcar-qualified on a standard turn restriction
Given the node map
| | n | |
| w | j | e |
| | s | |
And the ways
| nodes | oneway |
| sj | yes |
| nj | -1 |
| wj | -1 |
| ej | -1 |
And the relations
| type | way:from | way:to | node:via | restriction:motorcar |
| restriction | sj | nj | j | no_straight_on |
When I route I should get
| from | to | route |
| s | w | sj,wj |
| s | n | |
| s | e | sj,ej |
@except @except
Scenario: Car - Except tag and on no_ restrictions Scenario: Car - Except tag and on no_ restrictions
Given the node map Given the node map

View File

@ -19,6 +19,7 @@ Feature: Barriers
| wall | | | wall | |
| fence | | | fence | |
| some_tag | | | some_tag | |
| block | x |
Scenario: Foot - Access tag trumphs barriers Scenario: Foot - Access tag trumphs barriers
Then routability should be Then routability should be

View File

@ -16,7 +16,8 @@ Feature: osrm-prepare command line options: help
And stdout should contain "--restrictions" And stdout should contain "--restrictions"
And stdout should contain "--profile" And stdout should contain "--profile"
And stdout should contain "--threads" And stdout should contain "--threads"
And stdout should contain 15 lines And stdout should contain "--core"
And stdout should contain 17 lines
And it should exit with code 1 And it should exit with code 1
Scenario: osrm-prepare - Help, short Scenario: osrm-prepare - Help, short
@ -31,7 +32,8 @@ Feature: osrm-prepare command line options: help
And stdout should contain "--restrictions" And stdout should contain "--restrictions"
And stdout should contain "--profile" And stdout should contain "--profile"
And stdout should contain "--threads" And stdout should contain "--threads"
And stdout should contain 15 lines And stdout should contain "--core"
And stdout should contain 17 lines
And it should exit with code 0 And it should exit with code 0
Scenario: osrm-prepare - Help, long Scenario: osrm-prepare - Help, long
@ -46,5 +48,6 @@ Feature: osrm-prepare command line options: help
And stdout should contain "--restrictions" And stdout should contain "--restrictions"
And stdout should contain "--profile" And stdout should contain "--profile"
And stdout should contain "--threads" And stdout should contain "--threads"
And stdout should contain 15 lines And stdout should contain "--core"
And stdout should contain 17 lines
And it should exit with code 0 And it should exit with code 0

View File

@ -0,0 +1,18 @@
@raster @extract
Feature: osrm-extract with a profile containing raster source
# expansions:
# {osm_base} => path to current input file
# {profile} => path to current profile script
Scenario: osrm-extract on a valid profile
Given the profile "rasterbot"
And the node map
| a | b |
And the ways
| nodes |
| ab |
And the data has been saved to disk
When I run "osrm-extract {osm_base}.osm -p {profile}"
Then stderr should be empty
And stdout should contain "source loader"
And it should exit with code 0

View File

@ -0,0 +1,78 @@
@routing @speed @raster
Feature: Raster - weights
Background: Use specific speeds
Given the node locations
| node | lat | lon |
| a | 0.1 | 0.1 |
| b | .05 | 0.1 |
| c | 0.0 | 0.1 |
| d | .05 | .03 |
| e | .05 | .066 |
| f | .075 | .066 |
And the ways
| nodes | highway |
| ab | primary |
| ad | primary |
| bc | primary |
| dc | primary |
| de | primary |
| eb | primary |
| df | primary |
| fb | primary |
And the raster source
"""
0 0 0 0
0 0 0 250
0 0 250 500
0 0 0 250
0 0 0 0
"""
Scenario: Weighting not based on raster sources
Given the profile "testbot"
When I run "osrm-extract {osm_base}.osm -p {profile}"
And I run "osrm-prepare {osm_base}.osm"
And I route I should get
| from | to | route | speed |
| a | b | ab | 36 km/h |
| a | c | ab,bc | 36 km/h |
| b | c | bc | 36 km/h |
| a | d | ad | 36 km/h |
| d | c | dc | 36 km/h |
Scenario: Weighting based on raster sources
Given the profile "rasterbot"
When I run "osrm-extract {osm_base}.osm -p {profile}"
Then stdout should contain "evaluating segment"
And I run "osrm-prepare {osm_base}.osm"
And I route I should get
| from | to | route | speed |
| a | b | ab | 8 km/h |
| a | c | ad,dc | 15 km/h |
| b | c | bc | 8 km/h |
| a | d | ad | 15 km/h |
| d | c | dc | 15 km/h |
| d | e | de | 10 km/h |
| e | b | eb | 10 km/h |
| d | f | df | 15 km/h |
| f | b | fb | 7 km/h |
| d | b | de,eb | 10 km/h |
Scenario: Weighting based on raster sources
Given the profile "rasterbot-interp"
When I run "osrm-extract {osm_base}.osm -p {profile}"
Then stdout should contain "evaluating segment"
And I run "osrm-prepare {osm_base}.osm"
And I route I should get
| from | to | route | speed |
| a | b | ab | 8 km/h |
| a | c | ad,dc | 15 km/h |
| b | c | bc | 8 km/h |
| a | d | ad | 15 km/h |
| d | c | dc | 15 km/h |
| d | e | de | 10 km/h |
| e | b | eb | 10 km/h |
| d | f | df | 15 km/h |
| f | b | fb | 7 km/h |
| d | b | de,eb | 10 km/h |

View File

@ -115,8 +115,8 @@ Given /^the relations$/ do |table|
raise "*** unknown relation way member '#{way_name}'" unless way raise "*** unknown relation way member '#{way_name}'" unless way
relation << OSM::Member.new( 'way', way.id, $1 ) relation << OSM::Member.new( 'way', way.id, $1 )
end end
elsif key =~ /^(.*):(.*)/ elsif key =~ /^(.*):(.*)/ && "#{$1}" != 'restriction'
raise "*** unknown relation member type '#{$1}', must be either 'node' or 'way'" raise "*** unknown relation member type '#{$1}:#{$2}', must be either 'node' or 'way'"
else else
relation << { key => value } relation << { key => value }
end end
@ -134,6 +134,12 @@ Given /^the input file ([^"]*)$/ do |file|
@osm_str = File.read file @osm_str = File.read file
end end
Given /^the raster source$/ do |data|
Dir.chdir TEST_FOLDER do
File.open("rastersource.asc", "w") {|f| f.write(data)}
end
end
Given /^the data has been saved to disk$/ do Given /^the data has been saved to disk$/ do
begin begin
write_input_data write_input_data

View File

@ -94,3 +94,170 @@ When /^I match I should get$/ do |table|
table.diff! actual table.diff! actual
end end
When /^I match with turns I should get$/ do |table|
reprocess
actual = []
OSRMLoader.load(self,"#{prepared_file}.osrm") do
table.hashes.each_with_index do |row,ri|
if row['request']
got = {'request' => row['request'] }
response = request_url row['request']
else
params = @query_params
trace = []
timestamps = []
if row['from'] and row['to']
node = find_node_by_name(row['from'])
raise "*** unknown from-node '#{row['from']}" unless node
trace << node
node = find_node_by_name(row['to'])
raise "*** unknown to-node '#{row['to']}" unless node
trace << node
got = {'from' => row['from'], 'to' => row['to'] }
response = request_matching trace, timestamps, params
elsif row['trace']
row['trace'].each_char do |n|
node = find_node_by_name(n.strip)
raise "*** unknown waypoint node '#{n.strip}" unless node
trace << node
end
if row['timestamps']
timestamps = row['timestamps'].split(" ").compact.map { |t| t.to_i}
end
got = {'trace' => row['trace'] }
response = request_matching trace, timestamps, params
else
raise "*** no trace"
end
end
row.each_pair do |k,v|
if k =~ /param:(.*)/
if v=='(nil)'
params[$1]=nil
elsif v!=nil
params[$1]=v
end
got[k]=v
end
end
if response.body.empty? == false
json = JSON.parse response.body
end
if response.body.empty? == false
if response.code == "200"
instructions = way_list json['matchings'][0]['instructions']
bearings = bearing_list json['matchings'][0]['instructions']
compasses = compass_list json['matchings'][0]['instructions']
turns = turn_list json['matchings'][0]['instructions']
modes = mode_list json['matchings'][0]['instructions']
times = time_list json['matchings'][0]['instructions']
distances = distance_list json['matchings'][0]['instructions']
end
end
if table.headers.include? 'status'
got['status'] = json['status'].to_s
end
if table.headers.include? 'message'
got['message'] = json['status_message']
end
if table.headers.include? '#' # comment column
got['#'] = row['#'] # copy value so it always match
end
sub_matchings = []
if response.code == "200"
if table.headers.include? 'matchings'
sub_matchings = json['matchings'].compact.map { |sub| sub['matched_points']}
got['route'] = (instructions || '').strip
if table.headers.include?('distance')
if row['distance']!=''
raise "*** Distance must be specied in meters. (ex: 250m)" unless row['distance'] =~ /\d+m/
end
got['distance'] = instructions ? "#{json['route_summary']['total_distance'].to_s}m" : ''
end
if table.headers.include?('time')
raise "*** Time must be specied in seconds. (ex: 60s)" unless row['time'] =~ /\d+s/
got['time'] = instructions ? "#{json['route_summary']['total_time'].to_s}s" : ''
end
if table.headers.include?('speed')
if row['speed'] != '' && instructions
raise "*** Speed must be specied in km/h. (ex: 50 km/h)" unless row['speed'] =~ /\d+ km\/h/
time = json['route_summary']['total_time']
distance = json['route_summary']['total_distance']
speed = time>0 ? (3.6*distance/time).to_i : nil
got['speed'] = "#{speed} km/h"
else
got['speed'] = ''
end
end
if table.headers.include? 'bearing'
got['bearing'] = instructions ? bearings : ''
end
if table.headers.include? 'compass'
got['compass'] = instructions ? compasses : ''
end
if table.headers.include? 'turns'
got['turns'] = instructions ? turns : ''
end
if table.headers.include? 'modes'
got['modes'] = instructions ? modes : ''
end
if table.headers.include? 'times'
got['times'] = instructions ? times : ''
end
if table.headers.include? 'distances'
got['distances'] = instructions ? distances : ''
end
end
if table.headers.include? 'start'
got['start'] = instructions ? json['route_summary']['start_point'] : nil
end
if table.headers.include? 'end'
got['end'] = instructions ? json['route_summary']['end_point'] : nil
end
if table.headers.include? 'geometry'
got['geometry'] = json['route_geometry']
end
end
ok = true
encoded_result = ""
extended_target = ""
row['matchings'].split(',').each_with_index do |sub, sub_idx|
if sub_idx >= sub_matchings.length
ok = false
break
end
sub.length.times do |node_idx|
node = find_node_by_name(sub[node_idx])
out_node = sub_matchings[sub_idx][node_idx]
if FuzzyMatch.match_location out_node, node
encoded_result += sub[node_idx]
extended_target += sub[node_idx]
else
encoded_result += "? [#{out_node[0]},#{out_node[1]}]"
extended_target += "#{sub[node_idx]} [#{node.lat},#{node.lon}]"
ok = false
end
end
end
if ok
got['matchings'] = row['matchings']
got['timestamps'] = row['timestamps']
else
got['matchings'] = encoded_result
row['matchings'] = extended_target
log_fail row,got, { 'matching' => {:query => @query, :response => response} }
end
actual << got
end
end
table.diff! actual
end

View File

@ -97,7 +97,7 @@ When /^I route I should get$/ do |table|
raise "*** Speed must be specied in km/h. (ex: 50 km/h)" unless row['speed'] =~ /\d+ km\/h/ raise "*** Speed must be specied in km/h. (ex: 50 km/h)" unless row['speed'] =~ /\d+ km\/h/
time = json['route_summary']['total_time'] time = json['route_summary']['total_time']
distance = json['route_summary']['total_distance'] distance = json['route_summary']['total_distance']
speed = time>0 ? (3.6*distance/time).to_i : nil speed = time>0 ? (3.6*distance/time).round : nil
got['speed'] = "#{speed} km/h" got['speed'] = "#{speed} km/h"
else else
got['speed'] = '' got['speed'] = ''

View File

@ -0,0 +1,121 @@
When /^I plan a trip I should get$/ do |table|
reprocess
actual = []
OSRMLoader.load(self,"#{prepared_file}.osrm") do
table.hashes.each_with_index do |row,ri|
if row['request']
got = {'request' => row['request'] }
response = request_url row['request']
else
params = @query_params
waypoints = []
if row['from'] and row['to']
node = find_node_by_name(row['from'])
raise "*** unknown from-node '#{row['from']}" unless node
waypoints << node
node = find_node_by_name(row['to'])
raise "*** unknown to-node '#{row['to']}" unless node
waypoints << node
got = {'from' => row['from'], 'to' => row['to'] }
response = request_trip waypoints, params
elsif row['waypoints']
row['waypoints'].split(',').each do |n|
node = find_node_by_name(n.strip)
raise "*** unknown waypoint node '#{n.strip}" unless node
waypoints << node
end
got = {'waypoints' => row['waypoints'] }
response = request_trip waypoints, params
else
raise "*** no waypoints"
end
end
row.each_pair do |k,v|
if k =~ /param:(.*)/
if v=='(nil)'
params[$1]=nil
elsif v!=nil
params[$1]=v
end
got[k]=v
end
end
if response.body.empty? == false
json = JSON.parse response.body
end
if table.headers.include? 'status'
got['status'] = json['status'].to_s
end
if table.headers.include? 'message'
got['message'] = json['status_message']
end
if table.headers.include? '#' # comment column
got['#'] = row['#'] # copy value so it always match
end
if response.code == "200"
if table.headers.include? 'trips'
sub_trips = json['trips'].compact.map { |sub| sub['via_points']}
end
end
######################
ok = true
encoded_result = ""
extended_target = ""
row['trips'].split(',').each_with_index do |sub, sub_idx|
if sub_idx >= sub_trips.length
ok = false
break
end
ok = false;
#TODO: Check all rotations of the round trip
sub.length.times do |node_idx|
node = find_node_by_name(sub[node_idx])
out_node = sub_trips[sub_idx][node_idx]
if FuzzyMatch.match_location out_node, node
encoded_result += sub[node_idx]
extended_target += sub[node_idx]
ok = true
else
encoded_result += "? [#{out_node[0]},#{out_node[1]}]"
extended_target += "#{sub[node_idx]} [#{node.lat},#{node.lon}]"
end
end
end
if ok
got['trips'] = row['trips']
got['via_points'] = row['via_points']
else
got['trips'] = encoded_result
row['trips'] = extended_target
log_fail row,got, { 'trip' => {:query => @query, :response => response} }
end
ok = true
row.keys.each do |key|
if FuzzyMatch.match got[key], row[key]
got[key] = row[key]
else
ok = false
end
end
unless ok
log_fail row,got, { 'trip' => {:query => @query, :response => response} }
end
actual << got
end
end
table.diff! actual
end

View File

@ -296,7 +296,7 @@ def prepare_data
raise PrepareError.new $?.exitstatus, "osrm-prepare exited with code #{$?.exitstatus}." raise PrepareError.new $?.exitstatus, "osrm-prepare exited with code #{$?.exitstatus}."
end end
begin begin
["osrm.hsgr","osrm.fileIndex","osrm.geometry","osrm.nodes","osrm.ramIndex"].each do |file| ["osrm.hsgr","osrm.fileIndex","osrm.geometry","osrm.nodes","osrm.ramIndex","osrm.core"].each do |file|
File.rename "#{extracted_file}.#{file}", "#{prepared_file}.#{file}" File.rename "#{extracted_file}.#{file}", "#{prepared_file}.#{file}"
end end
rescue Exception => e rescue Exception => e

View File

@ -14,6 +14,7 @@ DEFAULT_SPEEDPROFILE = 'bicycle'
WAY_SPACING = 100 WAY_SPACING = 100
DEFAULT_GRID_SIZE = 100 #meters DEFAULT_GRID_SIZE = 100 #meters
PROFILES_PATH = File.join ROOT_FOLDER, 'profiles' PROFILES_PATH = File.join ROOT_FOLDER, 'profiles'
FIXTURES_PATH = File.join ROOT_FOLDER, 'unit_tests/fixtures'
BIN_PATH = File.join ROOT_FOLDER, 'build' BIN_PATH = File.join ROOT_FOLDER, 'build'
DEFAULT_INPUT_FORMAT = 'osm' DEFAULT_INPUT_FORMAT = 'osm'
DEFAULT_ORIGIN = [1,1] DEFAULT_ORIGIN = [1,1]

View File

@ -3,7 +3,7 @@ require 'net/http'
HOST = "http://127.0.0.1:#{OSRM_PORT}" HOST = "http://127.0.0.1:#{OSRM_PORT}"
def request_matching trace=[], timestamps=[], options={} def request_matching trace=[], timestamps=[], options={}
defaults = { 'output' => 'json' } defaults = { 'output' => 'json', 'instructions' => 'true' }
locs = trace.compact.map { |w| "loc=#{w.lat},#{w.lon}" } locs = trace.compact.map { |w| "loc=#{w.lat},#{w.lon}" }
ts = timestamps.compact.map { |t| "t=#{t}" } ts = timestamps.compact.map { |t| "t=#{t}" }
if ts.length > 0 if ts.length > 0

14
features/support/trip.rb Normal file
View File

@ -0,0 +1,14 @@
require 'net/http'
HOST = "http://127.0.0.1:#{OSRM_PORT}"
def request_trip waypoints=[], params={}
defaults = { 'output' => 'json' }
locs = waypoints.compact.map { |w| "loc=#{w.lat},#{w.lon}" }
params = (locs + defaults.merge(params).to_param).join('&')
params = nil if params==""
uri = generate_request_url ("trip" + '?' + params)
response = send_request uri, waypoints, params
end

View File

@ -18,5 +18,5 @@ Feature: Geometry Compression
When I route I should get When I route I should get
| from | to | route | distance | speed | | from | to | route | distance | speed |
| b | e | abcdef | 589m | 35 km/h | | b | e | abcdef | 589m | 36 km/h |
| e | b | abcdef | 589m | 35 km/h | | e | b | abcdef | 589m | 36 km/h |

View File

@ -40,6 +40,7 @@ Feature: Basic Map Matching
| afcde | abcde | | afcde | abcde |
Scenario: Testbot - Map matching with oneways Scenario: Testbot - Map matching with oneways
Given a grid size of 10 meters
Given the node map Given the node map
| a | b | c | d | | a | b | c | d |
| e | f | g | h | | e | f | g | h |

View File

@ -0,0 +1,82 @@
@routing @turns @testbot
Feature: Turn directions/codes
Background:
Given the profile "testbot"
Scenario: Turn directions
Given the node map
| o | p | a | b | c |
| n | | | | d |
| m | | x | | e |
| l | | | | f |
| k | j | i | h | g |
And the ways
| nodes |
| xa |
| xb |
| xc |
| xd |
| xe |
| xf |
| xg |
| xh |
| xi |
| xj |
| xk |
| xl |
| xm |
| xn |
| xo |
| xp |
When I match with turns I should get
| trace | route | turns | matchings |
| im | xi,xm | head,left,destination | im |
| io | xi,xo | head,slight_left,destination | io |
| ia | xi,xa | head,straight,destination | ia |
| ic | xi,xc | head,slight_right,destination | ic |
| ie | xi,xe | head,right,destination | ie |
| ko | xk,xo | head,left,destination | ko |
| ka | xk,xa | head,slight_left,destination | ka |
| kc | xk,xc | head,straight,destination | kc |
| ke | xk,xe | head,slight_right,destination | ke |
| kg | xk,xg | head,right,destination | kg |
| ma | xm,xa | head,left,destination | ma |
| mc | xm,xc | head,slight_left,destination | mc |
| me | xm,xe | head,straight,destination | me |
| mg | xm,xg | head,slight_right,destination | mg |
| mi | xm,xi | head,right,destination | mi |
| oc | xo,xc | head,left,destination | oc |
| oe | xo,xe | head,slight_left,destination | oe |
| og | xo,xg | head,straight,destination | og |
| oi | xo,xi | head,slight_right,destination | oi |
| ok | xo,xk | head,right,destination | ok |
| ae | xa,xe | head,left,destination | ae |
| ag | xa,xg | head,slight_left,destination | ag |
| ai | xa,xi | head,straight,destination | ai |
| ak | xa,xk | head,slight_right,destination | ak |
| am | xa,xm | head,right,destination | am |
| cg | xc,xg | head,left,destination | cg |
| ci | xc,xi | head,slight_left,destination | ci |
| ck | xc,xk | head,straight,destination | ck |
| cm | xc,xm | head,slight_right,destination | cm |
| co | xc,xo | head,right,destination | co |
| ei | xe,xi | head,left,destination | ei |
| ek | xe,xk | head,slight_left,destination | ek |
| em | xe,xm | head,straight,destination | em |
| eo | xe,xo | head,slight_right,destination | eo |
| ea | xe,xa | head,right,destination | ea |
| gk | xg,xk | head,left,destination | gk |
| gm | xg,xm | head,slight_left,destination | gm |
| go | xg,xo | head,straight,destination | go |
| ga | xg,xa | head,slight_right,destination | ga |
| gc | xg,xc | head,right,destination | gc |

View File

@ -12,6 +12,20 @@ Feature: Testbot - Travel mode
Background: Background:
Given the profile "testbot" Given the profile "testbot"
Scenario: Testbot - Always announce mode change
Given the node map
| a | b | c | d |
And the ways
| nodes | highway | name |
| ab | residential | foo |
| bc | river | foo |
| cd | residential | foo |
When I route I should get
| from | to | route | modes |
| a | d | foo,foo,foo | 1,3,1 |
Scenario: Testbot - Modes in each direction, different forward/backward speeds Scenario: Testbot - Modes in each direction, different forward/backward speeds
Given the node map Given the node map
| | 0 | 1 | | | | 0 | 1 | |

View File

@ -30,6 +30,7 @@ Feature: POST request
| z | x | yz,xy | head,left,destination | | z | x | yz,xy | head,left,destination |
Scenario: Testbot - match POST request Scenario: Testbot - match POST request
Given a grid size of 10 meters
Given the node map Given the node map
| a | b | c | d | | a | b | c | d |
| e | f | g | h | | e | f | g | h |

View File

@ -0,0 +1,86 @@
@trip @testbot
Feature: Basic trip planning
Background:
Given the profile "testbot"
Given a grid size of 10 meters
Scenario: Testbot - Trip planning with less than 10 nodes
Given the node map
| a | b |
| d | c |
And the ways
| nodes |
| ab |
| bc |
| cb |
| da |
When I plan a trip I should get
| waypoints | trips |
| a,b,c,d | dcba |
Scenario: Testbot - Trip planning with more than 10 nodes
Given the node map
| a | b | c | d |
| l | | | e |
| k | | | f |
| j | i | h | g |
And the ways
| nodes |
| ab |
| bc |
| cb |
| de |
| ef |
| fg |
| gh |
| hi |
| ij |
| jk |
| kl |
| la |
When I plan a trip I should get
| waypoints | trips |
| a,b,c,d,e,f,g,h,i,j,k,l | cbalkjihgfedc |
Scenario: Testbot - Trip planning with multiple scc
Given the node map
| a | b | c | d |
| l | | | e |
| k | | | f |
| j | i | h | g |
| | | | |
| m | n | | |
| p | o | | |
And the ways
| nodes |
| ab |
| bc |
| cb |
| de |
| ef |
| fg |
| gh |
| hi |
| ij |
| jk |
| kl |
| la |
| mn |
| no |
| op |
| pm |
When I plan a trip I should get
| waypoints | trips |
| a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p | cbalkjihgfedc,ponm |

Some files were not shown because too many files have changed in this diff Show More