remove templates from routing algorithms
This commit is contained in:
parent
f2c3b9859e
commit
d129b0ef24
@ -39,8 +39,8 @@ class MatchPlugin : public BasePlugin
|
||||
|
||||
private:
|
||||
mutable SearchEngineData heaps;
|
||||
mutable routing_algorithms::MapMatching<datafacade::BaseDataFacade> map_matching;
|
||||
mutable routing_algorithms::ShortestPathRouting<datafacade::BaseDataFacade> shortest_path;
|
||||
mutable routing_algorithms::MapMatching map_matching;
|
||||
mutable routing_algorithms::ShortestPathRouting shortest_path;
|
||||
const int max_locations_map_matching;
|
||||
};
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ class TablePlugin final : public BasePlugin
|
||||
|
||||
private:
|
||||
mutable SearchEngineData heaps;
|
||||
mutable routing_algorithms::ManyToManyRouting<datafacade::BaseDataFacade> distance_table;
|
||||
mutable routing_algorithms::ManyToManyRouting distance_table;
|
||||
const int max_locations_distance_table;
|
||||
};
|
||||
}
|
||||
|
@ -30,11 +30,11 @@ class TripPlugin final : public BasePlugin
|
||||
{
|
||||
private:
|
||||
mutable SearchEngineData heaps;
|
||||
mutable routing_algorithms::ShortestPathRouting<datafacade::BaseDataFacade> shortest_path;
|
||||
mutable routing_algorithms::ManyToManyRouting<datafacade::BaseDataFacade> duration_table;
|
||||
mutable routing_algorithms::ShortestPathRouting shortest_path;
|
||||
mutable routing_algorithms::ManyToManyRouting duration_table;
|
||||
const int max_locations_trip;
|
||||
|
||||
InternalRouteResult ComputeRoute(const datafacade::BaseDataFacade &facade,
|
||||
InternalRouteResult ComputeRoute(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const std::vector<PhantomNode> &phantom_node_list,
|
||||
const std::vector<NodeID> &trip) const;
|
||||
|
||||
|
@ -29,10 +29,9 @@ class ViaRoutePlugin final : public BasePlugin
|
||||
{
|
||||
private:
|
||||
mutable SearchEngineData heaps;
|
||||
mutable routing_algorithms::ShortestPathRouting<datafacade::BaseDataFacade> shortest_path;
|
||||
mutable routing_algorithms::AlternativeRouting<datafacade::BaseDataFacade> alternative_path;
|
||||
mutable routing_algorithms::DirectShortestPathRouting<datafacade::BaseDataFacade>
|
||||
direct_shortest_path;
|
||||
mutable routing_algorithms::ShortestPathRouting shortest_path;
|
||||
mutable routing_algorithms::AlternativeRouting alternative_path;
|
||||
mutable routing_algorithms::DirectShortestPathRouting direct_shortest_path;
|
||||
const int max_locations_viaroute;
|
||||
|
||||
public:
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef ALTERNATIVE_PATH_ROUTING_HPP
|
||||
#define ALTERNATIVE_PATH_ROUTING_HPP
|
||||
|
||||
#include "engine/datafacade/datafacade_base.hpp"
|
||||
#include "engine/routing_algorithms/routing_base.hpp"
|
||||
#include "engine/search_engine_data.hpp"
|
||||
#include "util/integer_range.hpp"
|
||||
@ -9,6 +10,7 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
|
||||
@ -21,15 +23,13 @@ namespace engine
|
||||
namespace routing_algorithms
|
||||
{
|
||||
|
||||
const double VIAPATH_ALPHA = 0.10;
|
||||
const double VIAPATH_EPSILON = 0.15; // alternative at most 15% longer
|
||||
const double VIAPATH_GAMMA = 0.75; // alternative shares at most 75% with the shortest.
|
||||
const double constexpr VIAPATH_ALPHA = 0.10;
|
||||
const double constexpr VIAPATH_EPSILON = 0.15; // alternative at most 15% longer
|
||||
const double constexpr VIAPATH_GAMMA = 0.75; // alternative shares at most 75% with the shortest.
|
||||
|
||||
template <class DataFacadeT>
|
||||
class AlternativeRouting final : private BasicRoutingInterface<DataFacadeT>
|
||||
class AlternativeRouting final : private BasicRoutingInterface
|
||||
{
|
||||
using super = BasicRoutingInterface<DataFacadeT>;
|
||||
using EdgeData = typename DataFacadeT::EdgeData;
|
||||
using super = BasicRoutingInterface;
|
||||
using QueryHeap = SearchEngineData::QueryHeap;
|
||||
using SearchSpaceEdge = std::pair<NodeID, NodeID>;
|
||||
|
||||
@ -59,326 +59,9 @@ class AlternativeRouting final : private BasicRoutingInterface<DataFacadeT>
|
||||
|
||||
virtual ~AlternativeRouting() {}
|
||||
|
||||
void operator()(const DataFacadeT &facade,
|
||||
void operator()(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const PhantomNodes &phantom_node_pair,
|
||||
InternalRouteResult &raw_route_data)
|
||||
{
|
||||
std::vector<NodeID> alternative_path;
|
||||
std::vector<NodeID> via_node_candidate_list;
|
||||
std::vector<SearchSpaceEdge> forward_search_space;
|
||||
std::vector<SearchSpaceEdge> reverse_search_space;
|
||||
|
||||
// Init queues, semi-expensive because access to TSS invokes a sys-call
|
||||
engine_working_data.InitializeOrClearFirstThreadLocalStorage(facade.GetNumberOfNodes());
|
||||
engine_working_data.InitializeOrClearSecondThreadLocalStorage(facade.GetNumberOfNodes());
|
||||
engine_working_data.InitializeOrClearThirdThreadLocalStorage(facade.GetNumberOfNodes());
|
||||
|
||||
QueryHeap &forward_heap1 = *(engine_working_data.forward_heap_1);
|
||||
QueryHeap &reverse_heap1 = *(engine_working_data.reverse_heap_1);
|
||||
QueryHeap &forward_heap2 = *(engine_working_data.forward_heap_2);
|
||||
QueryHeap &reverse_heap2 = *(engine_working_data.reverse_heap_2);
|
||||
|
||||
int upper_bound_to_shortest_path_weight = INVALID_EDGE_WEIGHT;
|
||||
NodeID middle_node = SPECIAL_NODEID;
|
||||
const EdgeWeight min_edge_offset =
|
||||
std::min(phantom_node_pair.source_phantom.forward_segment_id.enabled
|
||||
? -phantom_node_pair.source_phantom.GetForwardWeightPlusOffset()
|
||||
: 0,
|
||||
phantom_node_pair.source_phantom.reverse_segment_id.enabled
|
||||
? -phantom_node_pair.source_phantom.GetReverseWeightPlusOffset()
|
||||
: 0);
|
||||
|
||||
if (phantom_node_pair.source_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
BOOST_ASSERT(phantom_node_pair.source_phantom.forward_segment_id.id !=
|
||||
SPECIAL_SEGMENTID);
|
||||
forward_heap1.Insert(phantom_node_pair.source_phantom.forward_segment_id.id,
|
||||
-phantom_node_pair.source_phantom.GetForwardWeightPlusOffset(),
|
||||
phantom_node_pair.source_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (phantom_node_pair.source_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
BOOST_ASSERT(phantom_node_pair.source_phantom.reverse_segment_id.id !=
|
||||
SPECIAL_SEGMENTID);
|
||||
forward_heap1.Insert(phantom_node_pair.source_phantom.reverse_segment_id.id,
|
||||
-phantom_node_pair.source_phantom.GetReverseWeightPlusOffset(),
|
||||
phantom_node_pair.source_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
if (phantom_node_pair.target_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
BOOST_ASSERT(phantom_node_pair.target_phantom.forward_segment_id.id !=
|
||||
SPECIAL_SEGMENTID);
|
||||
reverse_heap1.Insert(phantom_node_pair.target_phantom.forward_segment_id.id,
|
||||
phantom_node_pair.target_phantom.GetForwardWeightPlusOffset(),
|
||||
phantom_node_pair.target_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (phantom_node_pair.target_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
BOOST_ASSERT(phantom_node_pair.target_phantom.reverse_segment_id.id !=
|
||||
SPECIAL_SEGMENTID);
|
||||
reverse_heap1.Insert(phantom_node_pair.target_phantom.reverse_segment_id.id,
|
||||
phantom_node_pair.target_phantom.GetReverseWeightPlusOffset(),
|
||||
phantom_node_pair.target_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
// search from s and t till new_min/(1+epsilon) > length_of_shortest_path
|
||||
while (0 < (forward_heap1.Size() + reverse_heap1.Size()))
|
||||
{
|
||||
if (0 < forward_heap1.Size())
|
||||
{
|
||||
AlternativeRoutingStep<true>(facade,
|
||||
forward_heap1,
|
||||
reverse_heap1,
|
||||
&middle_node,
|
||||
&upper_bound_to_shortest_path_weight,
|
||||
via_node_candidate_list,
|
||||
forward_search_space,
|
||||
min_edge_offset);
|
||||
}
|
||||
if (0 < reverse_heap1.Size())
|
||||
{
|
||||
AlternativeRoutingStep<false>(facade,
|
||||
forward_heap1,
|
||||
reverse_heap1,
|
||||
&middle_node,
|
||||
&upper_bound_to_shortest_path_weight,
|
||||
via_node_candidate_list,
|
||||
reverse_search_space,
|
||||
min_edge_offset);
|
||||
}
|
||||
}
|
||||
|
||||
if (INVALID_EDGE_WEIGHT == upper_bound_to_shortest_path_weight)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
std::sort(begin(via_node_candidate_list), end(via_node_candidate_list));
|
||||
auto unique_end = std::unique(begin(via_node_candidate_list), end(via_node_candidate_list));
|
||||
via_node_candidate_list.resize(unique_end - begin(via_node_candidate_list));
|
||||
|
||||
std::vector<NodeID> packed_forward_path;
|
||||
std::vector<NodeID> packed_reverse_path;
|
||||
|
||||
const bool path_is_a_loop =
|
||||
upper_bound_to_shortest_path_weight !=
|
||||
forward_heap1.GetKey(middle_node) + reverse_heap1.GetKey(middle_node);
|
||||
if (path_is_a_loop)
|
||||
{
|
||||
// Self Loop
|
||||
packed_forward_path.push_back(middle_node);
|
||||
packed_forward_path.push_back(middle_node);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
super::RetrievePackedPathFromSingleHeap(
|
||||
forward_heap1, middle_node, packed_forward_path);
|
||||
super::RetrievePackedPathFromSingleHeap(
|
||||
reverse_heap1, middle_node, packed_reverse_path);
|
||||
}
|
||||
|
||||
// this set is is used as an indicator if a node is on the shortest path
|
||||
std::unordered_set<NodeID> nodes_in_path(packed_forward_path.size() +
|
||||
packed_reverse_path.size());
|
||||
nodes_in_path.insert(packed_forward_path.begin(), packed_forward_path.end());
|
||||
nodes_in_path.insert(middle_node);
|
||||
nodes_in_path.insert(packed_reverse_path.begin(), packed_reverse_path.end());
|
||||
|
||||
std::unordered_map<NodeID, int> approximated_forward_sharing;
|
||||
std::unordered_map<NodeID, int> approximated_reverse_sharing;
|
||||
|
||||
// sweep over search space, compute forward sharing for each current edge (u,v)
|
||||
for (const SearchSpaceEdge ¤t_edge : forward_search_space)
|
||||
{
|
||||
const NodeID u = current_edge.first;
|
||||
const NodeID v = current_edge.second;
|
||||
|
||||
if (nodes_in_path.find(v) != nodes_in_path.end())
|
||||
{
|
||||
// current_edge is on shortest path => sharing(v):=queue.GetKey(v);
|
||||
approximated_forward_sharing.emplace(v, forward_heap1.GetKey(v));
|
||||
}
|
||||
else
|
||||
{
|
||||
// current edge is not on shortest path. Check if we know a value for the other
|
||||
// endpoint
|
||||
const auto sharing_of_u_iterator = approximated_forward_sharing.find(u);
|
||||
if (sharing_of_u_iterator != approximated_forward_sharing.end())
|
||||
{
|
||||
approximated_forward_sharing.emplace(v, sharing_of_u_iterator->second);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sweep over search space, compute backward sharing
|
||||
for (const SearchSpaceEdge ¤t_edge : reverse_search_space)
|
||||
{
|
||||
const NodeID u = current_edge.first;
|
||||
const NodeID v = current_edge.second;
|
||||
if (nodes_in_path.find(v) != nodes_in_path.end())
|
||||
{
|
||||
// current_edge is on shortest path => sharing(u):=queue.GetKey(u);
|
||||
approximated_reverse_sharing.emplace(v, reverse_heap1.GetKey(v));
|
||||
}
|
||||
else
|
||||
{
|
||||
// current edge is not on shortest path. Check if we know a value for the other
|
||||
// endpoint
|
||||
const auto sharing_of_u_iterator = approximated_reverse_sharing.find(u);
|
||||
if (sharing_of_u_iterator != approximated_reverse_sharing.end())
|
||||
{
|
||||
approximated_reverse_sharing.emplace(v, sharing_of_u_iterator->second);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// util::Log(logDEBUG) << "fwd_search_space size: " <<
|
||||
// forward_search_space.size() << ", marked " << approximated_forward_sharing.size() << "
|
||||
// nodes";
|
||||
// util::Log(logDEBUG) << "rev_search_space size: " <<
|
||||
// reverse_search_space.size() << ", marked " << approximated_reverse_sharing.size() << "
|
||||
// nodes";
|
||||
|
||||
std::vector<NodeID> preselected_node_list;
|
||||
for (const NodeID node : via_node_candidate_list)
|
||||
{
|
||||
if (node == middle_node)
|
||||
continue;
|
||||
const auto fwd_iterator = approximated_forward_sharing.find(node);
|
||||
const int fwd_sharing =
|
||||
(fwd_iterator != approximated_forward_sharing.end()) ? fwd_iterator->second : 0;
|
||||
const auto rev_iterator = approximated_reverse_sharing.find(node);
|
||||
const int rev_sharing =
|
||||
(rev_iterator != approximated_reverse_sharing.end()) ? rev_iterator->second : 0;
|
||||
|
||||
const int approximated_sharing = fwd_sharing + rev_sharing;
|
||||
const int approximated_length = forward_heap1.GetKey(node) + reverse_heap1.GetKey(node);
|
||||
const bool length_passes =
|
||||
(approximated_length < upper_bound_to_shortest_path_weight * (1 + VIAPATH_EPSILON));
|
||||
const bool sharing_passes =
|
||||
(approximated_sharing <= upper_bound_to_shortest_path_weight * VIAPATH_GAMMA);
|
||||
const bool stretch_passes =
|
||||
(approximated_length - approximated_sharing) <
|
||||
((1. + VIAPATH_ALPHA) *
|
||||
(upper_bound_to_shortest_path_weight - approximated_sharing));
|
||||
|
||||
if (length_passes && sharing_passes && stretch_passes)
|
||||
{
|
||||
preselected_node_list.emplace_back(node);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<NodeID> &packed_shortest_path = packed_forward_path;
|
||||
if (!path_is_a_loop)
|
||||
{
|
||||
std::reverse(packed_shortest_path.begin(), packed_shortest_path.end());
|
||||
packed_shortest_path.emplace_back(middle_node);
|
||||
packed_shortest_path.insert(
|
||||
packed_shortest_path.end(), packed_reverse_path.begin(), packed_reverse_path.end());
|
||||
}
|
||||
std::vector<RankedCandidateNode> ranked_candidates_list;
|
||||
|
||||
// prioritizing via nodes for deep inspection
|
||||
for (const NodeID node : preselected_node_list)
|
||||
{
|
||||
int length_of_via_path = 0, sharing_of_via_path = 0;
|
||||
ComputeLengthAndSharingOfViaPath(facade,
|
||||
node,
|
||||
&length_of_via_path,
|
||||
&sharing_of_via_path,
|
||||
packed_shortest_path,
|
||||
min_edge_offset);
|
||||
const int maximum_allowed_sharing =
|
||||
static_cast<int>(upper_bound_to_shortest_path_weight * VIAPATH_GAMMA);
|
||||
if (sharing_of_via_path <= maximum_allowed_sharing &&
|
||||
length_of_via_path <= upper_bound_to_shortest_path_weight * (1 + VIAPATH_EPSILON))
|
||||
{
|
||||
ranked_candidates_list.emplace_back(node, length_of_via_path, sharing_of_via_path);
|
||||
}
|
||||
}
|
||||
std::sort(ranked_candidates_list.begin(), ranked_candidates_list.end());
|
||||
|
||||
NodeID selected_via_node = SPECIAL_NODEID;
|
||||
int length_of_via_path = INVALID_EDGE_WEIGHT;
|
||||
NodeID s_v_middle = SPECIAL_NODEID, v_t_middle = SPECIAL_NODEID;
|
||||
for (const RankedCandidateNode &candidate : ranked_candidates_list)
|
||||
{
|
||||
if (ViaNodeCandidatePassesTTest(facade,
|
||||
forward_heap1,
|
||||
reverse_heap1,
|
||||
forward_heap2,
|
||||
reverse_heap2,
|
||||
candidate,
|
||||
upper_bound_to_shortest_path_weight,
|
||||
&length_of_via_path,
|
||||
&s_v_middle,
|
||||
&v_t_middle,
|
||||
min_edge_offset))
|
||||
{
|
||||
// select first admissable
|
||||
selected_via_node = candidate.node;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Unpack shortest path and alternative, if they exist
|
||||
if (INVALID_EDGE_WEIGHT != upper_bound_to_shortest_path_weight)
|
||||
{
|
||||
BOOST_ASSERT(!packed_shortest_path.empty());
|
||||
raw_route_data.unpacked_path_segments.resize(1);
|
||||
raw_route_data.source_traversed_in_reverse.push_back(
|
||||
(packed_shortest_path.front() !=
|
||||
phantom_node_pair.source_phantom.forward_segment_id.id));
|
||||
raw_route_data.target_traversed_in_reverse.push_back(
|
||||
(packed_shortest_path.back() !=
|
||||
phantom_node_pair.target_phantom.forward_segment_id.id));
|
||||
|
||||
super::UnpackPath(facade,
|
||||
// -- packed input
|
||||
packed_shortest_path.begin(),
|
||||
packed_shortest_path.end(),
|
||||
// -- start of route
|
||||
phantom_node_pair,
|
||||
// -- unpacked output
|
||||
raw_route_data.unpacked_path_segments.front());
|
||||
raw_route_data.shortest_path_length = upper_bound_to_shortest_path_weight;
|
||||
}
|
||||
|
||||
if (SPECIAL_NODEID != selected_via_node)
|
||||
{
|
||||
std::vector<NodeID> packed_alternate_path;
|
||||
// retrieve alternate path
|
||||
RetrievePackedAlternatePath(forward_heap1,
|
||||
reverse_heap1,
|
||||
forward_heap2,
|
||||
reverse_heap2,
|
||||
s_v_middle,
|
||||
v_t_middle,
|
||||
packed_alternate_path);
|
||||
|
||||
raw_route_data.alt_source_traversed_in_reverse.push_back(
|
||||
(packed_alternate_path.front() !=
|
||||
phantom_node_pair.source_phantom.forward_segment_id.id));
|
||||
raw_route_data.alt_target_traversed_in_reverse.push_back(
|
||||
(packed_alternate_path.back() !=
|
||||
phantom_node_pair.target_phantom.forward_segment_id.id));
|
||||
|
||||
// unpack the alternate path
|
||||
super::UnpackPath(facade,
|
||||
packed_alternate_path.begin(),
|
||||
packed_alternate_path.end(),
|
||||
phantom_node_pair,
|
||||
raw_route_data.unpacked_alternative);
|
||||
|
||||
raw_route_data.alternative_path_length = length_of_via_path;
|
||||
}
|
||||
else
|
||||
{
|
||||
BOOST_ASSERT(raw_route_data.alternative_path_length == INVALID_EDGE_WEIGHT);
|
||||
}
|
||||
}
|
||||
InternalRouteResult &raw_route_data);
|
||||
|
||||
private:
|
||||
// unpack alternate <s,..,v,..,t> by exploring search spaces from v
|
||||
@ -388,243 +71,23 @@ class AlternativeRouting final : private BasicRoutingInterface<DataFacadeT>
|
||||
const QueryHeap &reverse_heap2,
|
||||
const NodeID s_v_middle,
|
||||
const NodeID v_t_middle,
|
||||
std::vector<NodeID> &packed_path) const
|
||||
{
|
||||
// fetch packed path [s,v)
|
||||
std::vector<NodeID> packed_v_t_path;
|
||||
super::RetrievePackedPathFromHeap(forward_heap1, reverse_heap2, s_v_middle, packed_path);
|
||||
packed_path.pop_back(); // remove middle node. It's in both half-paths
|
||||
|
||||
// fetch patched path [v,t]
|
||||
super::RetrievePackedPathFromHeap(
|
||||
forward_heap2, reverse_heap1, v_t_middle, packed_v_t_path);
|
||||
|
||||
packed_path.insert(packed_path.end(), packed_v_t_path.begin(), packed_v_t_path.end());
|
||||
}
|
||||
std::vector<NodeID> &packed_path) const;
|
||||
|
||||
// TODO: reorder parameters
|
||||
// compute and unpack <s,..,v> and <v,..,t> by exploring search spaces
|
||||
// from v and intersecting against queues. only half-searches have to be
|
||||
// done at this stage
|
||||
void ComputeLengthAndSharingOfViaPath(const DataFacadeT &facade,
|
||||
const NodeID via_node,
|
||||
int *real_length_of_via_path,
|
||||
int *sharing_of_via_path,
|
||||
const std::vector<NodeID> &packed_shortest_path,
|
||||
const EdgeWeight min_edge_offset)
|
||||
{
|
||||
engine_working_data.InitializeOrClearSecondThreadLocalStorage(facade.GetNumberOfNodes());
|
||||
|
||||
QueryHeap &existing_forward_heap = *engine_working_data.forward_heap_1;
|
||||
QueryHeap &existing_reverse_heap = *engine_working_data.reverse_heap_1;
|
||||
QueryHeap &new_forward_heap = *engine_working_data.forward_heap_2;
|
||||
QueryHeap &new_reverse_heap = *engine_working_data.reverse_heap_2;
|
||||
|
||||
std::vector<NodeID> packed_s_v_path;
|
||||
std::vector<NodeID> packed_v_t_path;
|
||||
|
||||
std::vector<NodeID> partially_unpacked_shortest_path;
|
||||
std::vector<NodeID> partially_unpacked_via_path;
|
||||
|
||||
NodeID s_v_middle = SPECIAL_NODEID;
|
||||
int upper_bound_s_v_path_length = INVALID_EDGE_WEIGHT;
|
||||
new_reverse_heap.Insert(via_node, 0, via_node);
|
||||
// compute path <s,..,v> by reusing forward search from s
|
||||
const bool constexpr STALLING_ENABLED = true;
|
||||
const bool constexpr DO_NOT_FORCE_LOOPS = false;
|
||||
while (!new_reverse_heap.Empty())
|
||||
{
|
||||
super::RoutingStep(facade,
|
||||
new_reverse_heap,
|
||||
existing_forward_heap,
|
||||
s_v_middle,
|
||||
upper_bound_s_v_path_length,
|
||||
min_edge_offset,
|
||||
false,
|
||||
STALLING_ENABLED,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
// compute path <v,..,t> by reusing backward search from node t
|
||||
NodeID v_t_middle = SPECIAL_NODEID;
|
||||
int upper_bound_of_v_t_path_length = INVALID_EDGE_WEIGHT;
|
||||
new_forward_heap.Insert(via_node, 0, via_node);
|
||||
while (!new_forward_heap.Empty())
|
||||
{
|
||||
super::RoutingStep(facade,
|
||||
new_forward_heap,
|
||||
existing_reverse_heap,
|
||||
v_t_middle,
|
||||
upper_bound_of_v_t_path_length,
|
||||
min_edge_offset,
|
||||
true,
|
||||
STALLING_ENABLED,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
*real_length_of_via_path = upper_bound_s_v_path_length + upper_bound_of_v_t_path_length;
|
||||
|
||||
if (SPECIAL_NODEID == s_v_middle || SPECIAL_NODEID == v_t_middle)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
// retrieve packed paths
|
||||
super::RetrievePackedPathFromHeap(
|
||||
existing_forward_heap, new_reverse_heap, s_v_middle, packed_s_v_path);
|
||||
super::RetrievePackedPathFromHeap(
|
||||
new_forward_heap, existing_reverse_heap, v_t_middle, packed_v_t_path);
|
||||
|
||||
// partial unpacking, compute sharing
|
||||
// First partially unpack s-->v until paths deviate, note length of common path.
|
||||
const auto s_v_min_path_size =
|
||||
std::min(packed_s_v_path.size(), packed_shortest_path.size()) - 1;
|
||||
for (const auto current_node : util::irange<std::size_t>(0UL, s_v_min_path_size))
|
||||
{
|
||||
if (packed_s_v_path[current_node] == packed_shortest_path[current_node] &&
|
||||
packed_s_v_path[current_node + 1] == packed_shortest_path[current_node + 1])
|
||||
{
|
||||
EdgeID edgeID = facade.FindEdgeInEitherDirection(packed_s_v_path[current_node],
|
||||
packed_s_v_path[current_node + 1]);
|
||||
*sharing_of_via_path += facade.GetEdgeData(edgeID).weight;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (packed_s_v_path[current_node] == packed_shortest_path[current_node])
|
||||
{
|
||||
super::UnpackEdge(facade,
|
||||
packed_s_v_path[current_node],
|
||||
packed_s_v_path[current_node + 1],
|
||||
partially_unpacked_via_path);
|
||||
super::UnpackEdge(facade,
|
||||
packed_shortest_path[current_node],
|
||||
packed_shortest_path[current_node + 1],
|
||||
partially_unpacked_shortest_path);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// traverse partially unpacked edge and note common prefix
|
||||
const int64_t packed_path_length =
|
||||
static_cast<int64_t>(std::min(partially_unpacked_via_path.size(),
|
||||
partially_unpacked_shortest_path.size())) -
|
||||
1;
|
||||
for (int64_t current_node = 0; (current_node < packed_path_length) &&
|
||||
(partially_unpacked_via_path[current_node] ==
|
||||
partially_unpacked_shortest_path[current_node] &&
|
||||
partially_unpacked_via_path[current_node + 1] ==
|
||||
partially_unpacked_shortest_path[current_node + 1]);
|
||||
++current_node)
|
||||
{
|
||||
EdgeID selected_edge =
|
||||
facade.FindEdgeInEitherDirection(partially_unpacked_via_path[current_node],
|
||||
partially_unpacked_via_path[current_node + 1]);
|
||||
*sharing_of_via_path += facade.GetEdgeData(selected_edge).weight;
|
||||
}
|
||||
|
||||
// Second, partially unpack v-->t in reverse order until paths deviate and note lengths
|
||||
int64_t via_path_index = static_cast<int64_t>(packed_v_t_path.size()) - 1;
|
||||
int64_t shortest_path_index = static_cast<int64_t>(packed_shortest_path.size()) - 1;
|
||||
for (; via_path_index > 0 && shortest_path_index > 0;
|
||||
--via_path_index, --shortest_path_index)
|
||||
{
|
||||
if (packed_v_t_path[via_path_index - 1] ==
|
||||
packed_shortest_path[shortest_path_index - 1] &&
|
||||
packed_v_t_path[via_path_index] == packed_shortest_path[shortest_path_index])
|
||||
{
|
||||
EdgeID edgeID = facade.FindEdgeInEitherDirection(
|
||||
packed_v_t_path[via_path_index - 1], packed_v_t_path[via_path_index]);
|
||||
*sharing_of_via_path += facade.GetEdgeData(edgeID).weight;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (packed_v_t_path[via_path_index] == packed_shortest_path[shortest_path_index])
|
||||
{
|
||||
super::UnpackEdge(facade,
|
||||
packed_v_t_path[via_path_index - 1],
|
||||
packed_v_t_path[via_path_index],
|
||||
partially_unpacked_via_path);
|
||||
super::UnpackEdge(facade,
|
||||
packed_shortest_path[shortest_path_index - 1],
|
||||
packed_shortest_path[shortest_path_index],
|
||||
partially_unpacked_shortest_path);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
via_path_index = static_cast<int64_t>(partially_unpacked_via_path.size()) - 1;
|
||||
shortest_path_index = static_cast<int64_t>(partially_unpacked_shortest_path.size()) - 1;
|
||||
for (; via_path_index > 0 && shortest_path_index > 0;
|
||||
--via_path_index, --shortest_path_index)
|
||||
{
|
||||
if (partially_unpacked_via_path[via_path_index - 1] ==
|
||||
partially_unpacked_shortest_path[shortest_path_index - 1] &&
|
||||
partially_unpacked_via_path[via_path_index] ==
|
||||
partially_unpacked_shortest_path[shortest_path_index])
|
||||
{
|
||||
EdgeID edgeID = facade.FindEdgeInEitherDirection(
|
||||
partially_unpacked_via_path[via_path_index - 1],
|
||||
partially_unpacked_via_path[via_path_index]);
|
||||
*sharing_of_via_path += facade.GetEdgeData(edgeID).weight;
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
// finished partial unpacking spree! Amount of sharing is stored to appropriate pointer
|
||||
// variable
|
||||
}
|
||||
|
||||
// int approximateAmountOfSharing(
|
||||
// const NodeID alternate_path_middle_node_id,
|
||||
// QueryHeap & forward_heap,
|
||||
// QueryHeap & reverse_heap,
|
||||
// const std::vector<NodeID> & packed_shortest_path
|
||||
// ) const {
|
||||
// std::vector<NodeID> packed_alternate_path;
|
||||
// super::RetrievePackedPathFromHeap(
|
||||
// forward_heap,
|
||||
// reverse_heap,
|
||||
// alternate_path_middle_node_id,
|
||||
// packed_alternate_path
|
||||
// );
|
||||
|
||||
// if(packed_shortest_path.size() < 2 || packed_alternate_path.size() < 2) {
|
||||
// return 0;
|
||||
// }
|
||||
|
||||
// int sharing = 0;
|
||||
// int aindex = 0;
|
||||
// //compute forward sharing
|
||||
// while( (packed_alternate_path[aindex] == packed_shortest_path[aindex]) &&
|
||||
// (packed_alternate_path[aindex+1] == packed_shortest_path[aindex+1]) ) {
|
||||
// // util::Log() << "retrieving edge (" <<
|
||||
// packed_alternate_path[aindex] << "," << packed_alternate_path[aindex+1] << ")";
|
||||
// EdgeID edgeID = facade->FindEdgeInEitherDirection(packed_alternate_path[aindex],
|
||||
// packed_alternate_path[aindex+1]);
|
||||
// sharing += facade->GetEdgeData(edgeID).weight;
|
||||
// ++aindex;
|
||||
// }
|
||||
|
||||
// aindex = packed_alternate_path.size()-1;
|
||||
// int bindex = packed_shortest_path.size()-1;
|
||||
// //compute backward sharing
|
||||
// while( aindex > 0 && bindex > 0 && (packed_alternate_path[aindex] ==
|
||||
// packed_shortest_path[bindex]) && (packed_alternate_path[aindex-1] ==
|
||||
// packed_shortest_path[bindex-1]) ) {
|
||||
// EdgeID edgeID = facade->FindEdgeInEitherDirection(packed_alternate_path[aindex],
|
||||
// packed_alternate_path[aindex-1]);
|
||||
// sharing += facade->GetEdgeData(edgeID).weight;
|
||||
// --aindex; --bindex;
|
||||
// }
|
||||
// return sharing;
|
||||
// }
|
||||
void
|
||||
ComputeLengthAndSharingOfViaPath(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const NodeID via_node,
|
||||
int *real_length_of_via_path,
|
||||
int *sharing_of_via_path,
|
||||
const std::vector<NodeID> &packed_shortest_path,
|
||||
const EdgeWeight min_edge_offset);
|
||||
|
||||
// todo: reorder parameters
|
||||
template <bool is_forward_directed>
|
||||
void AlternativeRoutingStep(const DataFacadeT &facade,
|
||||
void AlternativeRoutingStep(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
QueryHeap &heap1,
|
||||
QueryHeap &heap2,
|
||||
NodeID *middle_node,
|
||||
@ -687,14 +150,14 @@ class AlternativeRouting final : private BasicRoutingInterface<DataFacadeT>
|
||||
}
|
||||
}
|
||||
|
||||
for (auto edge : facade.GetAdjacentEdgeRange(node))
|
||||
for (auto edge : facade->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const EdgeData &data = facade.GetEdgeData(edge);
|
||||
const EdgeData &data = facade->GetEdgeData(edge);
|
||||
const bool edge_is_forward_directed =
|
||||
(is_forward_directed ? data.forward : data.backward);
|
||||
if (edge_is_forward_directed)
|
||||
{
|
||||
const NodeID to = facade.GetTarget(edge);
|
||||
const NodeID to = facade->GetTarget(edge);
|
||||
const int edge_weight = data.weight;
|
||||
|
||||
BOOST_ASSERT(edge_weight > 0);
|
||||
@ -718,7 +181,7 @@ class AlternativeRouting final : private BasicRoutingInterface<DataFacadeT>
|
||||
}
|
||||
|
||||
// conduct T-Test
|
||||
bool ViaNodeCandidatePassesTTest(const DataFacadeT &facade,
|
||||
bool ViaNodeCandidatePassesTTest(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
QueryHeap &existing_forward_heap,
|
||||
QueryHeap &existing_reverse_heap,
|
||||
QueryHeap &new_forward_heap,
|
||||
@ -728,249 +191,11 @@ class AlternativeRouting final : private BasicRoutingInterface<DataFacadeT>
|
||||
int *length_of_via_path,
|
||||
NodeID *s_v_middle,
|
||||
NodeID *v_t_middle,
|
||||
const EdgeWeight min_edge_offset) const
|
||||
{
|
||||
new_forward_heap.Clear();
|
||||
new_reverse_heap.Clear();
|
||||
std::vector<NodeID> packed_s_v_path;
|
||||
std::vector<NodeID> packed_v_t_path;
|
||||
|
||||
*s_v_middle = SPECIAL_NODEID;
|
||||
int upper_bound_s_v_path_length = INVALID_EDGE_WEIGHT;
|
||||
// compute path <s,..,v> by reusing forward search from s
|
||||
new_reverse_heap.Insert(candidate.node, 0, candidate.node);
|
||||
const bool constexpr STALLING_ENABLED = true;
|
||||
const bool constexpr DO_NOT_FORCE_LOOPS = false;
|
||||
while (new_reverse_heap.Size() > 0)
|
||||
{
|
||||
super::RoutingStep(facade,
|
||||
new_reverse_heap,
|
||||
existing_forward_heap,
|
||||
*s_v_middle,
|
||||
upper_bound_s_v_path_length,
|
||||
min_edge_offset,
|
||||
false,
|
||||
STALLING_ENABLED,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
|
||||
if (INVALID_EDGE_WEIGHT == upper_bound_s_v_path_length)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// compute path <v,..,t> by reusing backward search from t
|
||||
*v_t_middle = SPECIAL_NODEID;
|
||||
int upper_bound_of_v_t_path_length = INVALID_EDGE_WEIGHT;
|
||||
new_forward_heap.Insert(candidate.node, 0, candidate.node);
|
||||
while (new_forward_heap.Size() > 0)
|
||||
{
|
||||
super::RoutingStep(facade,
|
||||
new_forward_heap,
|
||||
existing_reverse_heap,
|
||||
*v_t_middle,
|
||||
upper_bound_of_v_t_path_length,
|
||||
min_edge_offset,
|
||||
true,
|
||||
STALLING_ENABLED,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
|
||||
if (INVALID_EDGE_WEIGHT == upper_bound_of_v_t_path_length)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
*length_of_via_path = upper_bound_s_v_path_length + upper_bound_of_v_t_path_length;
|
||||
|
||||
// retrieve packed paths
|
||||
super::RetrievePackedPathFromHeap(
|
||||
existing_forward_heap, new_reverse_heap, *s_v_middle, packed_s_v_path);
|
||||
|
||||
super::RetrievePackedPathFromHeap(
|
||||
new_forward_heap, existing_reverse_heap, *v_t_middle, packed_v_t_path);
|
||||
|
||||
NodeID s_P = *s_v_middle, t_P = *v_t_middle;
|
||||
if (SPECIAL_NODEID == s_P)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (SPECIAL_NODEID == t_P)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
const int T_threshold = static_cast<int>(VIAPATH_EPSILON * length_of_shortest_path);
|
||||
int unpacked_until_weight = 0;
|
||||
|
||||
std::stack<SearchSpaceEdge> unpack_stack;
|
||||
// Traverse path s-->v
|
||||
for (std::size_t i = packed_s_v_path.size() - 1; (i > 0) && unpack_stack.empty(); --i)
|
||||
{
|
||||
const EdgeID current_edge_id =
|
||||
facade.FindEdgeInEitherDirection(packed_s_v_path[i - 1], packed_s_v_path[i]);
|
||||
const int length_of_current_edge = facade.GetEdgeData(current_edge_id).weight;
|
||||
if ((length_of_current_edge + unpacked_until_weight) >= T_threshold)
|
||||
{
|
||||
unpack_stack.emplace(packed_s_v_path[i - 1], packed_s_v_path[i]);
|
||||
}
|
||||
else
|
||||
{
|
||||
unpacked_until_weight += length_of_current_edge;
|
||||
s_P = packed_s_v_path[i - 1];
|
||||
}
|
||||
}
|
||||
|
||||
while (!unpack_stack.empty())
|
||||
{
|
||||
const SearchSpaceEdge via_path_edge = unpack_stack.top();
|
||||
unpack_stack.pop();
|
||||
EdgeID edge_in_via_path_id =
|
||||
facade.FindEdgeInEitherDirection(via_path_edge.first, via_path_edge.second);
|
||||
|
||||
if (SPECIAL_EDGEID == edge_in_via_path_id)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
const EdgeData ¤t_edge_data = facade.GetEdgeData(edge_in_via_path_id);
|
||||
const bool current_edge_is_shortcut = current_edge_data.shortcut;
|
||||
if (current_edge_is_shortcut)
|
||||
{
|
||||
const NodeID via_path_middle_node_id = current_edge_data.id;
|
||||
const EdgeID second_segment_edge_id =
|
||||
facade.FindEdgeInEitherDirection(via_path_middle_node_id, via_path_edge.second);
|
||||
const int second_segment_length = facade.GetEdgeData(second_segment_edge_id).weight;
|
||||
// attention: !unpacking in reverse!
|
||||
// Check if second segment is the one to go over treshold? if yes add second segment
|
||||
// to stack, else push first segment to stack and add weight of second one.
|
||||
if (unpacked_until_weight + second_segment_length >= T_threshold)
|
||||
{
|
||||
unpack_stack.emplace(via_path_middle_node_id, via_path_edge.second);
|
||||
}
|
||||
else
|
||||
{
|
||||
unpacked_until_weight += second_segment_length;
|
||||
unpack_stack.emplace(via_path_edge.first, via_path_middle_node_id);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// edge is not a shortcut, set the start node for T-Test to end of edge.
|
||||
unpacked_until_weight += current_edge_data.weight;
|
||||
s_P = via_path_edge.first;
|
||||
}
|
||||
}
|
||||
|
||||
int t_test_path_length = unpacked_until_weight;
|
||||
unpacked_until_weight = 0;
|
||||
// Traverse path s-->v
|
||||
BOOST_ASSERT(!packed_v_t_path.empty());
|
||||
for (unsigned i = 0, packed_path_length = static_cast<unsigned>(packed_v_t_path.size() - 1);
|
||||
(i < packed_path_length) && unpack_stack.empty();
|
||||
++i)
|
||||
{
|
||||
const EdgeID edgeID =
|
||||
facade.FindEdgeInEitherDirection(packed_v_t_path[i], packed_v_t_path[i + 1]);
|
||||
int length_of_current_edge = facade.GetEdgeData(edgeID).weight;
|
||||
if (length_of_current_edge + unpacked_until_weight >= T_threshold)
|
||||
{
|
||||
unpack_stack.emplace(packed_v_t_path[i], packed_v_t_path[i + 1]);
|
||||
}
|
||||
else
|
||||
{
|
||||
unpacked_until_weight += length_of_current_edge;
|
||||
t_P = packed_v_t_path[i + 1];
|
||||
}
|
||||
}
|
||||
|
||||
while (!unpack_stack.empty())
|
||||
{
|
||||
const SearchSpaceEdge via_path_edge = unpack_stack.top();
|
||||
unpack_stack.pop();
|
||||
EdgeID edge_in_via_path_id =
|
||||
facade.FindEdgeInEitherDirection(via_path_edge.first, via_path_edge.second);
|
||||
if (SPECIAL_EDGEID == edge_in_via_path_id)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
const EdgeData ¤t_edge_data = facade.GetEdgeData(edge_in_via_path_id);
|
||||
const bool IsViaEdgeShortCut = current_edge_data.shortcut;
|
||||
if (IsViaEdgeShortCut)
|
||||
{
|
||||
const NodeID middleOfViaPath = current_edge_data.id;
|
||||
EdgeID edgeIDOfFirstSegment =
|
||||
facade.FindEdgeInEitherDirection(via_path_edge.first, middleOfViaPath);
|
||||
int lengthOfFirstSegment = facade.GetEdgeData(edgeIDOfFirstSegment).weight;
|
||||
// Check if first segment is the one to go over treshold? if yes first segment to
|
||||
// stack, else push second segment to stack and add weight of first one.
|
||||
if (unpacked_until_weight + lengthOfFirstSegment >= T_threshold)
|
||||
{
|
||||
unpack_stack.emplace(via_path_edge.first, middleOfViaPath);
|
||||
}
|
||||
else
|
||||
{
|
||||
unpacked_until_weight += lengthOfFirstSegment;
|
||||
unpack_stack.emplace(middleOfViaPath, via_path_edge.second);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// edge is not a shortcut, set the start node for T-Test to end of edge.
|
||||
unpacked_until_weight += current_edge_data.weight;
|
||||
t_P = via_path_edge.second;
|
||||
}
|
||||
}
|
||||
|
||||
t_test_path_length += unpacked_until_weight;
|
||||
// Run actual T-Test query and compare if weight equal.
|
||||
engine_working_data.InitializeOrClearThirdThreadLocalStorage(facade.GetNumberOfNodes());
|
||||
|
||||
QueryHeap &forward_heap3 = *engine_working_data.forward_heap_3;
|
||||
QueryHeap &reverse_heap3 = *engine_working_data.reverse_heap_3;
|
||||
int upper_bound = INVALID_EDGE_WEIGHT;
|
||||
NodeID middle = SPECIAL_NODEID;
|
||||
|
||||
forward_heap3.Insert(s_P, 0, s_P);
|
||||
reverse_heap3.Insert(t_P, 0, t_P);
|
||||
// exploration from s and t until deletemin/(1+epsilon) > _lengt_oO_sShortest_path
|
||||
while ((forward_heap3.Size() + reverse_heap3.Size()) > 0)
|
||||
{
|
||||
if (!forward_heap3.Empty())
|
||||
{
|
||||
super::RoutingStep(facade,
|
||||
forward_heap3,
|
||||
reverse_heap3,
|
||||
middle,
|
||||
upper_bound,
|
||||
min_edge_offset,
|
||||
true,
|
||||
STALLING_ENABLED,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
if (!reverse_heap3.Empty())
|
||||
{
|
||||
super::RoutingStep(facade,
|
||||
reverse_heap3,
|
||||
forward_heap3,
|
||||
middle,
|
||||
upper_bound,
|
||||
min_edge_offset,
|
||||
false,
|
||||
STALLING_ENABLED,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
}
|
||||
return (upper_bound <= t_test_path_length);
|
||||
}
|
||||
const EdgeWeight min_edge_offset) const;
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace routing_algorithms
|
||||
} // namespace engine
|
||||
} // namespace osrm
|
||||
|
||||
#endif /* ALTERNATIVE_PATH_ROUTING_HPP */
|
||||
|
@ -3,7 +3,9 @@
|
||||
|
||||
#include <boost/assert.hpp>
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
|
||||
#include "engine/datafacade/datafacade_base.hpp"
|
||||
#include "engine/routing_algorithms/routing_base.hpp"
|
||||
#include "engine/search_engine_data.hpp"
|
||||
#include "util/integer_range.hpp"
|
||||
@ -23,10 +25,9 @@ namespace routing_algorithms
|
||||
/// by the previous route.
|
||||
/// This variation is only an optimazation for graphs with slow queries, for example
|
||||
/// not fully contracted graphs.
|
||||
template <class DataFacadeT>
|
||||
class DirectShortestPathRouting final : public BasicRoutingInterface<DataFacadeT>
|
||||
class DirectShortestPathRouting final : public BasicRoutingInterface
|
||||
{
|
||||
using super = BasicRoutingInterface<DataFacadeT>;
|
||||
using super = BasicRoutingInterface;
|
||||
using QueryHeap = SearchEngineData::QueryHeap;
|
||||
SearchEngineData &engine_working_data;
|
||||
|
||||
@ -38,116 +39,13 @@ class DirectShortestPathRouting final : public BasicRoutingInterface<DataFacadeT
|
||||
|
||||
~DirectShortestPathRouting() {}
|
||||
|
||||
void operator()(const DataFacadeT &facade,
|
||||
void operator()(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const std::vector<PhantomNodes> &phantom_nodes_vector,
|
||||
InternalRouteResult &raw_route_data) const
|
||||
{
|
||||
// Get weight to next pair of target nodes.
|
||||
BOOST_ASSERT_MSG(1 == phantom_nodes_vector.size(),
|
||||
"Direct Shortest Path Query only accepts a single source and target pair. "
|
||||
"Multiple ones have been specified.");
|
||||
const auto &phantom_node_pair = phantom_nodes_vector.front();
|
||||
const auto &source_phantom = phantom_node_pair.source_phantom;
|
||||
const auto &target_phantom = phantom_node_pair.target_phantom;
|
||||
|
||||
engine_working_data.InitializeOrClearFirstThreadLocalStorage(facade.GetNumberOfNodes());
|
||||
QueryHeap &forward_heap = *(engine_working_data.forward_heap_1);
|
||||
QueryHeap &reverse_heap = *(engine_working_data.reverse_heap_1);
|
||||
forward_heap.Clear();
|
||||
reverse_heap.Clear();
|
||||
|
||||
BOOST_ASSERT(source_phantom.IsValid());
|
||||
BOOST_ASSERT(target_phantom.IsValid());
|
||||
|
||||
if (source_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.forward_segment_id.id,
|
||||
-source_phantom.GetForwardWeightPlusOffset(),
|
||||
source_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (source_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.reverse_segment_id.id,
|
||||
-source_phantom.GetReverseWeightPlusOffset(),
|
||||
source_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
if (target_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.forward_segment_id.id,
|
||||
target_phantom.GetForwardWeightPlusOffset(),
|
||||
target_phantom.forward_segment_id.id);
|
||||
}
|
||||
|
||||
if (target_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.reverse_segment_id.id,
|
||||
target_phantom.GetReverseWeightPlusOffset(),
|
||||
target_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
int weight = INVALID_EDGE_WEIGHT;
|
||||
std::vector<NodeID> packed_leg;
|
||||
|
||||
const bool constexpr DO_NOT_FORCE_LOOPS =
|
||||
false; // prevents forcing of loops, since offsets are set correctly
|
||||
|
||||
if (facade.GetCoreSize() > 0)
|
||||
{
|
||||
engine_working_data.InitializeOrClearSecondThreadLocalStorage(
|
||||
facade.GetNumberOfNodes());
|
||||
QueryHeap &forward_core_heap = *(engine_working_data.forward_heap_2);
|
||||
QueryHeap &reverse_core_heap = *(engine_working_data.reverse_heap_2);
|
||||
forward_core_heap.Clear();
|
||||
reverse_core_heap.Clear();
|
||||
|
||||
super::SearchWithCore(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
weight,
|
||||
packed_leg,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
else
|
||||
{
|
||||
super::Search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
weight,
|
||||
packed_leg,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
|
||||
// No path found for both target nodes?
|
||||
if (INVALID_EDGE_WEIGHT == weight)
|
||||
{
|
||||
raw_route_data.shortest_path_length = INVALID_EDGE_WEIGHT;
|
||||
raw_route_data.alternative_path_length = INVALID_EDGE_WEIGHT;
|
||||
return;
|
||||
}
|
||||
|
||||
BOOST_ASSERT_MSG(!packed_leg.empty(), "packed path empty");
|
||||
|
||||
raw_route_data.shortest_path_length = weight;
|
||||
raw_route_data.unpacked_path_segments.resize(1);
|
||||
raw_route_data.source_traversed_in_reverse.push_back(
|
||||
(packed_leg.front() != phantom_node_pair.source_phantom.forward_segment_id.id));
|
||||
raw_route_data.target_traversed_in_reverse.push_back(
|
||||
(packed_leg.back() != phantom_node_pair.target_phantom.forward_segment_id.id));
|
||||
|
||||
super::UnpackPath(facade,
|
||||
packed_leg.begin(),
|
||||
packed_leg.end(),
|
||||
phantom_node_pair,
|
||||
raw_route_data.unpacked_path_segments.front());
|
||||
}
|
||||
InternalRouteResult &raw_route_data) const;
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace routing_algorithms
|
||||
} // namespace engine
|
||||
} // namespace osrm
|
||||
|
||||
#endif /* DIRECT_SHORTEST_PATH_HPP */
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef MANY_TO_MANY_ROUTING_HPP
|
||||
#define MANY_TO_MANY_ROUTING_HPP
|
||||
|
||||
#include "engine/datafacade/datafacade_base.hpp"
|
||||
#include "engine/routing_algorithms/routing_base.hpp"
|
||||
#include "engine/search_engine_data.hpp"
|
||||
#include "util/typedefs.hpp"
|
||||
@ -19,10 +20,9 @@ namespace engine
|
||||
namespace routing_algorithms
|
||||
{
|
||||
|
||||
template <class DataFacadeT>
|
||||
class ManyToManyRouting final : public BasicRoutingInterface<DataFacadeT>
|
||||
class ManyToManyRouting final : public BasicRoutingInterface
|
||||
{
|
||||
using super = BasicRoutingInterface<DataFacadeT>;
|
||||
using super = BasicRoutingInterface;
|
||||
using QueryHeap = SearchEngineData::QueryHeap;
|
||||
SearchEngineData &engine_working_data;
|
||||
|
||||
@ -45,196 +45,37 @@ class ManyToManyRouting final : public BasicRoutingInterface<DataFacadeT>
|
||||
{
|
||||
}
|
||||
|
||||
std::vector<EdgeWeight> operator()(const DataFacadeT &facade,
|
||||
const std::vector<PhantomNode> &phantom_nodes,
|
||||
const std::vector<std::size_t> &source_indices,
|
||||
const std::vector<std::size_t> &target_indices) const
|
||||
{
|
||||
const auto number_of_sources =
|
||||
source_indices.empty() ? phantom_nodes.size() : source_indices.size();
|
||||
const auto number_of_targets =
|
||||
target_indices.empty() ? phantom_nodes.size() : target_indices.size();
|
||||
const auto number_of_entries = number_of_sources * number_of_targets;
|
||||
std::vector<EdgeWeight> result_table(number_of_entries,
|
||||
std::numeric_limits<EdgeWeight>::max());
|
||||
std::vector<EdgeWeight>
|
||||
operator()(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const std::vector<PhantomNode> &phantom_nodes,
|
||||
const std::vector<std::size_t> &source_indices,
|
||||
const std::vector<std::size_t> &target_indices) const;
|
||||
|
||||
engine_working_data.InitializeOrClearFirstThreadLocalStorage(facade.GetNumberOfNodes());
|
||||
|
||||
QueryHeap &query_heap = *(engine_working_data.forward_heap_1);
|
||||
|
||||
SearchSpaceWithBuckets search_space_with_buckets;
|
||||
|
||||
unsigned column_idx = 0;
|
||||
const auto search_target_phantom = [&](const PhantomNode &phantom) {
|
||||
query_heap.Clear();
|
||||
// insert target(s) at weight 0
|
||||
|
||||
if (phantom.forward_segment_id.enabled)
|
||||
{
|
||||
query_heap.Insert(phantom.forward_segment_id.id,
|
||||
phantom.GetForwardWeightPlusOffset(),
|
||||
phantom.forward_segment_id.id);
|
||||
}
|
||||
if (phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
query_heap.Insert(phantom.reverse_segment_id.id,
|
||||
phantom.GetReverseWeightPlusOffset(),
|
||||
phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
// explore search space
|
||||
while (!query_heap.Empty())
|
||||
{
|
||||
BackwardRoutingStep(facade, column_idx, query_heap, search_space_with_buckets);
|
||||
}
|
||||
++column_idx;
|
||||
};
|
||||
|
||||
// for each source do forward search
|
||||
unsigned row_idx = 0;
|
||||
const auto search_source_phantom = [&](const PhantomNode &phantom) {
|
||||
query_heap.Clear();
|
||||
// insert target(s) at weight 0
|
||||
|
||||
if (phantom.forward_segment_id.enabled)
|
||||
{
|
||||
query_heap.Insert(phantom.forward_segment_id.id,
|
||||
-phantom.GetForwardWeightPlusOffset(),
|
||||
phantom.forward_segment_id.id);
|
||||
}
|
||||
if (phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
query_heap.Insert(phantom.reverse_segment_id.id,
|
||||
-phantom.GetReverseWeightPlusOffset(),
|
||||
phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
// explore search space
|
||||
while (!query_heap.Empty())
|
||||
{
|
||||
ForwardRoutingStep(facade,
|
||||
row_idx,
|
||||
number_of_targets,
|
||||
query_heap,
|
||||
search_space_with_buckets,
|
||||
result_table);
|
||||
}
|
||||
++row_idx;
|
||||
};
|
||||
|
||||
if (target_indices.empty())
|
||||
{
|
||||
for (const auto &phantom : phantom_nodes)
|
||||
{
|
||||
search_target_phantom(phantom);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (const auto index : target_indices)
|
||||
{
|
||||
const auto &phantom = phantom_nodes[index];
|
||||
search_target_phantom(phantom);
|
||||
}
|
||||
}
|
||||
|
||||
if (source_indices.empty())
|
||||
{
|
||||
for (const auto &phantom : phantom_nodes)
|
||||
{
|
||||
search_source_phantom(phantom);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (const auto index : source_indices)
|
||||
{
|
||||
const auto &phantom = phantom_nodes[index];
|
||||
search_source_phantom(phantom);
|
||||
}
|
||||
}
|
||||
|
||||
return result_table;
|
||||
}
|
||||
|
||||
void ForwardRoutingStep(const DataFacadeT &facade,
|
||||
void ForwardRoutingStep(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const unsigned row_idx,
|
||||
const unsigned number_of_targets,
|
||||
QueryHeap &query_heap,
|
||||
const SearchSpaceWithBuckets &search_space_with_buckets,
|
||||
std::vector<EdgeWeight> &result_table) const
|
||||
{
|
||||
const NodeID node = query_heap.DeleteMin();
|
||||
const int source_weight = query_heap.GetKey(node);
|
||||
std::vector<EdgeWeight> &result_table) const;
|
||||
|
||||
// check if each encountered node has an entry
|
||||
const auto bucket_iterator = search_space_with_buckets.find(node);
|
||||
// iterate bucket if there exists one
|
||||
if (bucket_iterator != search_space_with_buckets.end())
|
||||
{
|
||||
const std::vector<NodeBucket> &bucket_list = bucket_iterator->second;
|
||||
for (const NodeBucket ¤t_bucket : bucket_list)
|
||||
{
|
||||
// get target id from bucket entry
|
||||
const unsigned column_idx = current_bucket.target_id;
|
||||
const int target_weight = current_bucket.weight;
|
||||
auto ¤t_weight = result_table[row_idx * number_of_targets + column_idx];
|
||||
// check if new weight is better
|
||||
const EdgeWeight new_weight = source_weight + target_weight;
|
||||
if (new_weight < 0)
|
||||
{
|
||||
const EdgeWeight loop_weight = super::GetLoopWeight(facade, node);
|
||||
const int new_weight_with_loop = new_weight + loop_weight;
|
||||
if (loop_weight != INVALID_EDGE_WEIGHT && new_weight_with_loop >= 0)
|
||||
{
|
||||
current_weight = std::min(current_weight, new_weight_with_loop);
|
||||
}
|
||||
}
|
||||
else if (new_weight < current_weight)
|
||||
{
|
||||
result_table[row_idx * number_of_targets + column_idx] = new_weight;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (StallAtNode<true>(facade, node, source_weight, query_heap))
|
||||
{
|
||||
return;
|
||||
}
|
||||
RelaxOutgoingEdges<true>(facade, node, source_weight, query_heap);
|
||||
}
|
||||
|
||||
void BackwardRoutingStep(const DataFacadeT &facade,
|
||||
void BackwardRoutingStep(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const unsigned column_idx,
|
||||
QueryHeap &query_heap,
|
||||
SearchSpaceWithBuckets &search_space_with_buckets) const
|
||||
{
|
||||
const NodeID node = query_heap.DeleteMin();
|
||||
const int target_weight = query_heap.GetKey(node);
|
||||
|
||||
// store settled nodes in search space bucket
|
||||
search_space_with_buckets[node].emplace_back(column_idx, target_weight);
|
||||
|
||||
if (StallAtNode<false>(facade, node, target_weight, query_heap))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
RelaxOutgoingEdges<false>(facade, node, target_weight, query_heap);
|
||||
}
|
||||
SearchSpaceWithBuckets &search_space_with_buckets) const;
|
||||
|
||||
template <bool forward_direction>
|
||||
inline void RelaxOutgoingEdges(const DataFacadeT &facade,
|
||||
inline void RelaxOutgoingEdges(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const NodeID node,
|
||||
const EdgeWeight weight,
|
||||
QueryHeap &query_heap) const
|
||||
{
|
||||
for (auto edge : facade.GetAdjacentEdgeRange(node))
|
||||
for (auto edge : facade->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const auto &data = facade.GetEdgeData(edge);
|
||||
const auto &data = facade->GetEdgeData(edge);
|
||||
const bool direction_flag = (forward_direction ? data.forward : data.backward);
|
||||
if (direction_flag)
|
||||
{
|
||||
const NodeID to = facade.GetTarget(edge);
|
||||
const NodeID to = facade->GetTarget(edge);
|
||||
const int edge_weight = data.weight;
|
||||
|
||||
BOOST_ASSERT_MSG(edge_weight > 0, "edge_weight invalid");
|
||||
@ -258,18 +99,18 @@ class ManyToManyRouting final : public BasicRoutingInterface<DataFacadeT>
|
||||
|
||||
// Stalling
|
||||
template <bool forward_direction>
|
||||
inline bool StallAtNode(const DataFacadeT &facade,
|
||||
inline bool StallAtNode(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const NodeID node,
|
||||
const EdgeWeight weight,
|
||||
QueryHeap &query_heap) const
|
||||
{
|
||||
for (auto edge : facade.GetAdjacentEdgeRange(node))
|
||||
for (auto edge : facade->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const auto &data = facade.GetEdgeData(edge);
|
||||
const auto &data = facade->GetEdgeData(edge);
|
||||
const bool reverse_flag = ((!forward_direction) ? data.forward : data.backward);
|
||||
if (reverse_flag)
|
||||
{
|
||||
const NodeID to = facade.GetTarget(edge);
|
||||
const NodeID to = facade->GetTarget(edge);
|
||||
const int edge_weight = data.weight;
|
||||
BOOST_ASSERT_MSG(edge_weight > 0, "edge_weight invalid");
|
||||
if (query_heap.WasInserted(to))
|
||||
@ -284,8 +125,9 @@ class ManyToManyRouting final : public BasicRoutingInterface<DataFacadeT>
|
||||
return false;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace routing_algorithms
|
||||
} // namespace engine
|
||||
} // namespace osrm
|
||||
|
||||
#endif
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef MAP_MATCHING_HPP
|
||||
#define MAP_MATCHING_HPP
|
||||
|
||||
#include "engine/datafacade/datafacade_base.hpp"
|
||||
#include "engine/routing_algorithms/routing_base.hpp"
|
||||
|
||||
#include "engine/map_matching/hidden_markov_model.hpp"
|
||||
@ -16,6 +17,7 @@
|
||||
#include <algorithm>
|
||||
#include <deque>
|
||||
#include <iomanip>
|
||||
#include <memory>
|
||||
#include <numeric>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
@ -37,9 +39,9 @@ static const constexpr double MATCHING_BETA = 10;
|
||||
constexpr static const double MAX_DISTANCE_DELTA = 2000.;
|
||||
|
||||
// implements a hidden markov model map matching algorithm
|
||||
template <class DataFacadeT> class MapMatching final : public BasicRoutingInterface<DataFacadeT>
|
||||
class MapMatching final : public BasicRoutingInterface
|
||||
{
|
||||
using super = BasicRoutingInterface<DataFacadeT>;
|
||||
using super = BasicRoutingInterface;
|
||||
using QueryHeap = SearchEngineData::QueryHeap;
|
||||
SearchEngineData &engine_working_data;
|
||||
map_matching::EmissionLogProbability default_emission_log_probability;
|
||||
@ -47,20 +49,7 @@ template <class DataFacadeT> class MapMatching final : public BasicRoutingInterf
|
||||
map_matching::MatchingConfidence confidence;
|
||||
extractor::ProfileProperties m_profile_properties;
|
||||
|
||||
unsigned GetMedianSampleTime(const std::vector<unsigned> ×tamps) const
|
||||
{
|
||||
BOOST_ASSERT(timestamps.size() > 1);
|
||||
|
||||
std::vector<unsigned> sample_times(timestamps.size());
|
||||
|
||||
std::adjacent_difference(timestamps.begin(), timestamps.end(), sample_times.begin());
|
||||
|
||||
// don't use first element of sample_times -> will not be a difference.
|
||||
auto first_elem = std::next(sample_times.begin());
|
||||
auto median = first_elem + std::distance(first_elem, sample_times.end()) / 2;
|
||||
std::nth_element(first_elem, median, sample_times.end());
|
||||
return *median;
|
||||
}
|
||||
unsigned GetMedianSampleTime(const std::vector<unsigned> ×tamps) const;
|
||||
|
||||
public:
|
||||
MapMatching(SearchEngineData &engine_working_data, const double default_gps_precision)
|
||||
@ -71,357 +60,11 @@ template <class DataFacadeT> class MapMatching final : public BasicRoutingInterf
|
||||
}
|
||||
|
||||
SubMatchingList
|
||||
operator()(const DataFacadeT &facade,
|
||||
operator()(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const CandidateLists &candidates_list,
|
||||
const std::vector<util::Coordinate> &trace_coordinates,
|
||||
const std::vector<unsigned> &trace_timestamps,
|
||||
const std::vector<boost::optional<double>> &trace_gps_precision) const
|
||||
{
|
||||
SubMatchingList sub_matchings;
|
||||
|
||||
BOOST_ASSERT(candidates_list.size() == trace_coordinates.size());
|
||||
BOOST_ASSERT(candidates_list.size() > 1);
|
||||
|
||||
const bool use_timestamps = trace_timestamps.size() > 1;
|
||||
|
||||
const auto median_sample_time = [&] {
|
||||
if (use_timestamps)
|
||||
{
|
||||
return std::max(1u, GetMedianSampleTime(trace_timestamps));
|
||||
}
|
||||
else
|
||||
{
|
||||
return 1u;
|
||||
}
|
||||
}();
|
||||
const auto max_broken_time = median_sample_time * MAX_BROKEN_STATES;
|
||||
const auto max_distance_delta = [&] {
|
||||
if (use_timestamps)
|
||||
{
|
||||
return median_sample_time * facade.GetMapMatchingMaxSpeed();
|
||||
}
|
||||
else
|
||||
{
|
||||
return MAX_DISTANCE_DELTA;
|
||||
}
|
||||
}();
|
||||
|
||||
std::vector<std::vector<double>> emission_log_probabilities(trace_coordinates.size());
|
||||
if (trace_gps_precision.empty())
|
||||
{
|
||||
for (auto t = 0UL; t < candidates_list.size(); ++t)
|
||||
{
|
||||
emission_log_probabilities[t].resize(candidates_list[t].size());
|
||||
std::transform(candidates_list[t].begin(),
|
||||
candidates_list[t].end(),
|
||||
emission_log_probabilities[t].begin(),
|
||||
[this](const PhantomNodeWithDistance &candidate) {
|
||||
return default_emission_log_probability(candidate.distance);
|
||||
});
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (auto t = 0UL; t < candidates_list.size(); ++t)
|
||||
{
|
||||
emission_log_probabilities[t].resize(candidates_list[t].size());
|
||||
if (trace_gps_precision[t])
|
||||
{
|
||||
map_matching::EmissionLogProbability emission_log_probability(
|
||||
*trace_gps_precision[t]);
|
||||
std::transform(
|
||||
candidates_list[t].begin(),
|
||||
candidates_list[t].end(),
|
||||
emission_log_probabilities[t].begin(),
|
||||
[&emission_log_probability](const PhantomNodeWithDistance &candidate) {
|
||||
return emission_log_probability(candidate.distance);
|
||||
});
|
||||
}
|
||||
else
|
||||
{
|
||||
std::transform(candidates_list[t].begin(),
|
||||
candidates_list[t].end(),
|
||||
emission_log_probabilities[t].begin(),
|
||||
[this](const PhantomNodeWithDistance &candidate) {
|
||||
return default_emission_log_probability(candidate.distance);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
HMM model(candidates_list, emission_log_probabilities);
|
||||
|
||||
std::size_t initial_timestamp = model.initialize(0);
|
||||
if (initial_timestamp == map_matching::INVALID_STATE)
|
||||
{
|
||||
return sub_matchings;
|
||||
}
|
||||
|
||||
engine_working_data.InitializeOrClearFirstThreadLocalStorage(facade.GetNumberOfNodes());
|
||||
engine_working_data.InitializeOrClearSecondThreadLocalStorage(facade.GetNumberOfNodes());
|
||||
|
||||
QueryHeap &forward_heap = *(engine_working_data.forward_heap_1);
|
||||
QueryHeap &reverse_heap = *(engine_working_data.reverse_heap_1);
|
||||
QueryHeap &forward_core_heap = *(engine_working_data.forward_heap_2);
|
||||
QueryHeap &reverse_core_heap = *(engine_working_data.reverse_heap_2);
|
||||
|
||||
std::size_t breakage_begin = map_matching::INVALID_STATE;
|
||||
std::vector<std::size_t> split_points;
|
||||
std::vector<std::size_t> prev_unbroken_timestamps;
|
||||
prev_unbroken_timestamps.reserve(candidates_list.size());
|
||||
prev_unbroken_timestamps.push_back(initial_timestamp);
|
||||
for (auto t = initial_timestamp + 1; t < candidates_list.size(); ++t)
|
||||
{
|
||||
|
||||
const bool gap_in_trace = [&, use_timestamps]() {
|
||||
// use temporal information if available to determine a split
|
||||
if (use_timestamps)
|
||||
{
|
||||
return trace_timestamps[t] - trace_timestamps[prev_unbroken_timestamps.back()] >
|
||||
max_broken_time;
|
||||
}
|
||||
else
|
||||
{
|
||||
return t - prev_unbroken_timestamps.back() > MAX_BROKEN_STATES;
|
||||
}
|
||||
}();
|
||||
|
||||
if (!gap_in_trace)
|
||||
{
|
||||
BOOST_ASSERT(!prev_unbroken_timestamps.empty());
|
||||
const std::size_t prev_unbroken_timestamp = prev_unbroken_timestamps.back();
|
||||
|
||||
const auto &prev_viterbi = model.viterbi[prev_unbroken_timestamp];
|
||||
const auto &prev_pruned = model.pruned[prev_unbroken_timestamp];
|
||||
const auto &prev_unbroken_timestamps_list =
|
||||
candidates_list[prev_unbroken_timestamp];
|
||||
const auto &prev_coordinate = trace_coordinates[prev_unbroken_timestamp];
|
||||
|
||||
auto ¤t_viterbi = model.viterbi[t];
|
||||
auto ¤t_pruned = model.pruned[t];
|
||||
auto ¤t_parents = model.parents[t];
|
||||
auto ¤t_lengths = model.path_distances[t];
|
||||
const auto ¤t_timestamps_list = candidates_list[t];
|
||||
const auto ¤t_coordinate = trace_coordinates[t];
|
||||
|
||||
const auto haversine_distance = util::coordinate_calculation::haversineDistance(
|
||||
prev_coordinate, current_coordinate);
|
||||
// assumes minumum of 0.1 m/s
|
||||
const int duration_upper_bound =
|
||||
((haversine_distance + max_distance_delta) * 0.25) * 10;
|
||||
|
||||
// compute d_t for this timestamp and the next one
|
||||
for (const auto s : util::irange<std::size_t>(0UL, prev_viterbi.size()))
|
||||
{
|
||||
if (prev_pruned[s])
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
for (const auto s_prime :
|
||||
util::irange<std::size_t>(0UL, current_viterbi.size()))
|
||||
{
|
||||
const double emission_pr = emission_log_probabilities[t][s_prime];
|
||||
double new_value = prev_viterbi[s] + emission_pr;
|
||||
if (current_viterbi[s_prime] > new_value)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
forward_heap.Clear();
|
||||
reverse_heap.Clear();
|
||||
|
||||
double network_distance;
|
||||
if (facade.GetCoreSize() > 0)
|
||||
{
|
||||
forward_core_heap.Clear();
|
||||
reverse_core_heap.Clear();
|
||||
network_distance = super::GetNetworkDistanceWithCore(
|
||||
facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
prev_unbroken_timestamps_list[s].phantom_node,
|
||||
current_timestamps_list[s_prime].phantom_node,
|
||||
duration_upper_bound);
|
||||
}
|
||||
else
|
||||
{
|
||||
network_distance = super::GetNetworkDistance(
|
||||
facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
prev_unbroken_timestamps_list[s].phantom_node,
|
||||
current_timestamps_list[s_prime].phantom_node);
|
||||
}
|
||||
|
||||
// get distance diff between loc1/2 and locs/s_prime
|
||||
const auto d_t = std::abs(network_distance - haversine_distance);
|
||||
|
||||
// very low probability transition -> prune
|
||||
if (d_t >= max_distance_delta)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
const double transition_pr = transition_log_probability(d_t);
|
||||
new_value += transition_pr;
|
||||
|
||||
if (new_value > current_viterbi[s_prime])
|
||||
{
|
||||
current_viterbi[s_prime] = new_value;
|
||||
current_parents[s_prime] = std::make_pair(prev_unbroken_timestamp, s);
|
||||
current_lengths[s_prime] = network_distance;
|
||||
current_pruned[s_prime] = false;
|
||||
model.breakage[t] = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (model.breakage[t])
|
||||
{
|
||||
// save start of breakage -> we need this as split point
|
||||
if (t < breakage_begin)
|
||||
{
|
||||
breakage_begin = t;
|
||||
}
|
||||
|
||||
BOOST_ASSERT(prev_unbroken_timestamps.size() > 0);
|
||||
// remove both ends of the breakage
|
||||
prev_unbroken_timestamps.pop_back();
|
||||
}
|
||||
else
|
||||
{
|
||||
prev_unbroken_timestamps.push_back(t);
|
||||
}
|
||||
}
|
||||
|
||||
// breakage recover has removed all previous good points
|
||||
const bool trace_split = prev_unbroken_timestamps.empty();
|
||||
|
||||
if (trace_split || gap_in_trace)
|
||||
{
|
||||
std::size_t split_index = t;
|
||||
if (breakage_begin != map_matching::INVALID_STATE)
|
||||
{
|
||||
split_index = breakage_begin;
|
||||
breakage_begin = map_matching::INVALID_STATE;
|
||||
}
|
||||
split_points.push_back(split_index);
|
||||
|
||||
// note: this preserves everything before split_index
|
||||
model.Clear(split_index);
|
||||
std::size_t new_start = model.initialize(split_index);
|
||||
// no new start was found -> stop viterbi calculation
|
||||
if (new_start == map_matching::INVALID_STATE)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
prev_unbroken_timestamps.clear();
|
||||
prev_unbroken_timestamps.push_back(new_start);
|
||||
// Important: We potentially go back here!
|
||||
// However since t > new_start >= breakge_begin
|
||||
// we can only reset trace_coordindates.size() times.
|
||||
t = new_start;
|
||||
// note: the head of the loop will call ++t, hence the next
|
||||
// iteration will actually be on new_start+1
|
||||
}
|
||||
}
|
||||
|
||||
if (!prev_unbroken_timestamps.empty())
|
||||
{
|
||||
split_points.push_back(prev_unbroken_timestamps.back() + 1);
|
||||
}
|
||||
|
||||
std::size_t sub_matching_begin = initial_timestamp;
|
||||
for (const auto sub_matching_end : split_points)
|
||||
{
|
||||
map_matching::SubMatching matching;
|
||||
|
||||
std::size_t parent_timestamp_index = sub_matching_end - 1;
|
||||
while (parent_timestamp_index >= sub_matching_begin &&
|
||||
model.breakage[parent_timestamp_index])
|
||||
{
|
||||
--parent_timestamp_index;
|
||||
}
|
||||
while (sub_matching_begin < sub_matching_end && model.breakage[sub_matching_begin])
|
||||
{
|
||||
++sub_matching_begin;
|
||||
}
|
||||
|
||||
// matchings that only consist of one candidate are invalid
|
||||
if (parent_timestamp_index - sub_matching_begin + 1 < 2)
|
||||
{
|
||||
sub_matching_begin = sub_matching_end;
|
||||
continue;
|
||||
}
|
||||
|
||||
// loop through the columns, and only compare the last entry
|
||||
const auto max_element_iter =
|
||||
std::max_element(model.viterbi[parent_timestamp_index].begin(),
|
||||
model.viterbi[parent_timestamp_index].end());
|
||||
|
||||
std::size_t parent_candidate_index =
|
||||
std::distance(model.viterbi[parent_timestamp_index].begin(), max_element_iter);
|
||||
|
||||
std::deque<std::pair<std::size_t, std::size_t>> reconstructed_indices;
|
||||
while (parent_timestamp_index > sub_matching_begin)
|
||||
{
|
||||
if (model.breakage[parent_timestamp_index])
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
reconstructed_indices.emplace_front(parent_timestamp_index, parent_candidate_index);
|
||||
const auto &next = model.parents[parent_timestamp_index][parent_candidate_index];
|
||||
// make sure we can never get stuck in this loop
|
||||
if (parent_timestamp_index == next.first)
|
||||
{
|
||||
break;
|
||||
}
|
||||
parent_timestamp_index = next.first;
|
||||
parent_candidate_index = next.second;
|
||||
}
|
||||
reconstructed_indices.emplace_front(parent_timestamp_index, parent_candidate_index);
|
||||
if (reconstructed_indices.size() < 2)
|
||||
{
|
||||
sub_matching_begin = sub_matching_end;
|
||||
continue;
|
||||
}
|
||||
|
||||
auto matching_distance = 0.0;
|
||||
auto trace_distance = 0.0;
|
||||
matching.nodes.reserve(reconstructed_indices.size());
|
||||
matching.indices.reserve(reconstructed_indices.size());
|
||||
for (const auto &idx : reconstructed_indices)
|
||||
{
|
||||
const auto timestamp_index = idx.first;
|
||||
const auto location_index = idx.second;
|
||||
|
||||
matching.indices.push_back(timestamp_index);
|
||||
matching.nodes.push_back(
|
||||
candidates_list[timestamp_index][location_index].phantom_node);
|
||||
matching_distance += model.path_distances[timestamp_index][location_index];
|
||||
}
|
||||
util::for_each_pair(
|
||||
reconstructed_indices,
|
||||
[&trace_distance,
|
||||
&trace_coordinates](const std::pair<std::size_t, std::size_t> &prev,
|
||||
const std::pair<std::size_t, std::size_t> &curr) {
|
||||
trace_distance += util::coordinate_calculation::haversineDistance(
|
||||
trace_coordinates[prev.first], trace_coordinates[curr.first]);
|
||||
});
|
||||
|
||||
matching.confidence = confidence(trace_distance, matching_distance);
|
||||
|
||||
sub_matchings.push_back(matching);
|
||||
sub_matching_begin = sub_matching_end;
|
||||
}
|
||||
|
||||
return sub_matchings;
|
||||
}
|
||||
const std::vector<boost::optional<double>> &trace_gps_precision) const;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
#define ROUTING_BASE_HPP
|
||||
|
||||
#include "extractor/guidance/turn_instruction.hpp"
|
||||
#include "engine/datafacade/datafacade_base.hpp"
|
||||
#include "engine/edge_unpacker.hpp"
|
||||
#include "engine/internal_route_result.hpp"
|
||||
#include "engine/search_engine_data.hpp"
|
||||
@ -16,6 +17,7 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
#include <numeric>
|
||||
#include <stack>
|
||||
#include <utility>
|
||||
@ -29,10 +31,10 @@ namespace engine
|
||||
namespace routing_algorithms
|
||||
{
|
||||
|
||||
template <class DataFacadeT> class BasicRoutingInterface
|
||||
class BasicRoutingInterface
|
||||
{
|
||||
private:
|
||||
using EdgeData = typename DataFacadeT::EdgeData;
|
||||
protected:
|
||||
using EdgeData = datafacade::BaseDataFacade::EdgeData;
|
||||
|
||||
public:
|
||||
/*
|
||||
@ -64,7 +66,7 @@ template <class DataFacadeT> class BasicRoutingInterface
|
||||
Since we are dealing with a graph that contains _negative_ edges,
|
||||
we need to add an offset to the termination criterion.
|
||||
*/
|
||||
void RoutingStep(const DataFacadeT &facade,
|
||||
void RoutingStep(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
NodeID &middle_node_id,
|
||||
@ -73,137 +75,13 @@ template <class DataFacadeT> class BasicRoutingInterface
|
||||
const bool forward_direction,
|
||||
const bool stalling,
|
||||
const bool force_loop_forward,
|
||||
const bool force_loop_reverse) const
|
||||
{
|
||||
const NodeID node = forward_heap.DeleteMin();
|
||||
const std::int32_t weight = forward_heap.GetKey(node);
|
||||
const bool force_loop_reverse) const;
|
||||
|
||||
if (reverse_heap.WasInserted(node))
|
||||
{
|
||||
const std::int32_t new_weight = reverse_heap.GetKey(node) + weight;
|
||||
if (new_weight < upper_bound)
|
||||
{
|
||||
// if loops are forced, they are so at the source
|
||||
if ((force_loop_forward && forward_heap.GetData(node).parent == node) ||
|
||||
(force_loop_reverse && reverse_heap.GetData(node).parent == node) ||
|
||||
// in this case we are looking at a bi-directional way where the source
|
||||
// and target phantom are on the same edge based node
|
||||
new_weight < 0)
|
||||
{
|
||||
// check whether there is a loop present at the node
|
||||
for (const auto edge : facade.GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const EdgeData &data = facade.GetEdgeData(edge);
|
||||
bool forward_directionFlag =
|
||||
(forward_direction ? data.forward : data.backward);
|
||||
if (forward_directionFlag)
|
||||
{
|
||||
const NodeID to = facade.GetTarget(edge);
|
||||
if (to == node)
|
||||
{
|
||||
const EdgeWeight edge_weight = data.weight;
|
||||
const std::int32_t loop_weight = new_weight + edge_weight;
|
||||
if (loop_weight >= 0 && loop_weight < upper_bound)
|
||||
{
|
||||
middle_node_id = node;
|
||||
upper_bound = loop_weight;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
BOOST_ASSERT(new_weight >= 0);
|
||||
|
||||
middle_node_id = node;
|
||||
upper_bound = new_weight;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// make sure we don't terminate too early if we initialize the weight
|
||||
// for the nodes in the forward heap with the forward/reverse offset
|
||||
BOOST_ASSERT(min_edge_offset <= 0);
|
||||
if (weight + min_edge_offset > upper_bound)
|
||||
{
|
||||
forward_heap.DeleteAll();
|
||||
return;
|
||||
}
|
||||
|
||||
// Stalling
|
||||
if (stalling)
|
||||
{
|
||||
for (const auto edge : facade.GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const EdgeData &data = facade.GetEdgeData(edge);
|
||||
const bool reverse_flag = ((!forward_direction) ? data.forward : data.backward);
|
||||
if (reverse_flag)
|
||||
{
|
||||
const NodeID to = facade.GetTarget(edge);
|
||||
const EdgeWeight edge_weight = data.weight;
|
||||
|
||||
BOOST_ASSERT_MSG(edge_weight > 0, "edge_weight invalid");
|
||||
|
||||
if (forward_heap.WasInserted(to))
|
||||
{
|
||||
if (forward_heap.GetKey(to) + edge_weight < weight)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto edge : facade.GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const EdgeData &data = facade.GetEdgeData(edge);
|
||||
bool forward_directionFlag = (forward_direction ? data.forward : data.backward);
|
||||
if (forward_directionFlag)
|
||||
{
|
||||
const NodeID to = facade.GetTarget(edge);
|
||||
const EdgeWeight edge_weight = data.weight;
|
||||
|
||||
BOOST_ASSERT_MSG(edge_weight > 0, "edge_weight invalid");
|
||||
const int to_weight = weight + edge_weight;
|
||||
|
||||
// New Node discovered -> Add to Heap + Node Info Storage
|
||||
if (!forward_heap.WasInserted(to))
|
||||
{
|
||||
forward_heap.Insert(to, to_weight, node);
|
||||
}
|
||||
// Found a shorter Path -> Update weight
|
||||
else if (to_weight < forward_heap.GetKey(to))
|
||||
{
|
||||
// new parent
|
||||
forward_heap.GetData(to).parent = node;
|
||||
forward_heap.DecreaseKey(to, to_weight);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline EdgeWeight GetLoopWeight(const DataFacadeT &facade, NodeID node) const
|
||||
{
|
||||
EdgeWeight loop_weight = INVALID_EDGE_WEIGHT;
|
||||
for (auto edge : facade.GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const auto &data = facade.GetEdgeData(edge);
|
||||
if (data.forward)
|
||||
{
|
||||
const NodeID to = facade.GetTarget(edge);
|
||||
if (to == node)
|
||||
{
|
||||
loop_weight = std::min(loop_weight, data.weight);
|
||||
}
|
||||
}
|
||||
}
|
||||
return loop_weight;
|
||||
}
|
||||
EdgeWeight GetLoopWeight(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
NodeID node) const;
|
||||
|
||||
template <typename RandomIter>
|
||||
void UnpackPath(const DataFacadeT &facade,
|
||||
void UnpackPath(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
RandomIter packed_path_begin,
|
||||
RandomIter packed_path_end,
|
||||
const PhantomNodes &phantom_node_pair,
|
||||
@ -223,7 +101,7 @@ template <class DataFacadeT> class BasicRoutingInterface
|
||||
*std::prev(packed_path_end) == phantom_node_pair.target_phantom.reverse_segment_id.id);
|
||||
|
||||
UnpackCHPath(
|
||||
facade,
|
||||
*facade,
|
||||
packed_path_begin,
|
||||
packed_path_end,
|
||||
[this,
|
||||
@ -235,28 +113,30 @@ template <class DataFacadeT> class BasicRoutingInterface
|
||||
const EdgeData &edge_data) {
|
||||
|
||||
BOOST_ASSERT_MSG(!edge_data.shortcut, "original edge flagged as shortcut");
|
||||
const auto name_index = facade.GetNameIndexFromEdgeID(edge_data.id);
|
||||
const auto turn_instruction = facade.GetTurnInstructionForEdgeID(edge_data.id);
|
||||
const auto name_index = facade->GetNameIndexFromEdgeID(edge_data.id);
|
||||
const auto turn_instruction = facade->GetTurnInstructionForEdgeID(edge_data.id);
|
||||
const extractor::TravelMode travel_mode =
|
||||
(unpacked_path.empty() && start_traversed_in_reverse)
|
||||
? phantom_node_pair.source_phantom.backward_travel_mode
|
||||
: facade.GetTravelModeForEdgeID(edge_data.id);
|
||||
: facade->GetTravelModeForEdgeID(edge_data.id);
|
||||
|
||||
const auto geometry_index = facade.GetGeometryIndexForEdgeID(edge_data.id);
|
||||
const auto geometry_index = facade->GetGeometryIndexForEdgeID(edge_data.id);
|
||||
std::vector<NodeID> id_vector;
|
||||
std::vector<EdgeWeight> weight_vector;
|
||||
std::vector<DatasourceID> datasource_vector;
|
||||
if (geometry_index.forward)
|
||||
{
|
||||
id_vector = facade.GetUncompressedForwardGeometry(geometry_index.id);
|
||||
weight_vector = facade.GetUncompressedForwardWeights(geometry_index.id);
|
||||
datasource_vector = facade.GetUncompressedForwardDatasources(geometry_index.id);
|
||||
id_vector = facade->GetUncompressedForwardGeometry(geometry_index.id);
|
||||
weight_vector = facade->GetUncompressedForwardWeights(geometry_index.id);
|
||||
datasource_vector =
|
||||
facade->GetUncompressedForwardDatasources(geometry_index.id);
|
||||
}
|
||||
else
|
||||
{
|
||||
id_vector = facade.GetUncompressedReverseGeometry(geometry_index.id);
|
||||
weight_vector = facade.GetUncompressedReverseWeights(geometry_index.id);
|
||||
datasource_vector = facade.GetUncompressedReverseDatasources(geometry_index.id);
|
||||
id_vector = facade->GetUncompressedReverseGeometry(geometry_index.id);
|
||||
weight_vector = facade->GetUncompressedReverseWeights(geometry_index.id);
|
||||
datasource_vector =
|
||||
facade->GetUncompressedReverseDatasources(geometry_index.id);
|
||||
}
|
||||
BOOST_ASSERT(id_vector.size() > 0);
|
||||
BOOST_ASSERT(weight_vector.size() > 0);
|
||||
@ -294,14 +174,14 @@ template <class DataFacadeT> class BasicRoutingInterface
|
||||
util::guidance::TurnBearing(0)});
|
||||
}
|
||||
BOOST_ASSERT(unpacked_path.size() > 0);
|
||||
if (facade.hasLaneData(edge_data.id))
|
||||
unpacked_path.back().lane_data = facade.GetLaneData(edge_data.id);
|
||||
if (facade->hasLaneData(edge_data.id))
|
||||
unpacked_path.back().lane_data = facade->GetLaneData(edge_data.id);
|
||||
|
||||
unpacked_path.back().entry_classid = facade.GetEntryClassID(edge_data.id);
|
||||
unpacked_path.back().entry_classid = facade->GetEntryClassID(edge_data.id);
|
||||
unpacked_path.back().turn_instruction = turn_instruction;
|
||||
unpacked_path.back().duration_until_turn += (edge_data.weight - total_weight);
|
||||
unpacked_path.back().pre_turn_bearing = facade.PreTurnBearing(edge_data.id);
|
||||
unpacked_path.back().post_turn_bearing = facade.PostTurnBearing(edge_data.id);
|
||||
unpacked_path.back().pre_turn_bearing = facade->PreTurnBearing(edge_data.id);
|
||||
unpacked_path.back().post_turn_bearing = facade->PostTurnBearing(edge_data.id);
|
||||
});
|
||||
|
||||
std::size_t start_index = 0, end_index = 0;
|
||||
@ -314,13 +194,13 @@ template <class DataFacadeT> class BasicRoutingInterface
|
||||
|
||||
if (target_traversed_in_reverse)
|
||||
{
|
||||
id_vector = facade.GetUncompressedReverseGeometry(
|
||||
id_vector = facade->GetUncompressedReverseGeometry(
|
||||
phantom_node_pair.target_phantom.packed_geometry_id);
|
||||
|
||||
weight_vector = facade.GetUncompressedReverseWeights(
|
||||
weight_vector = facade->GetUncompressedReverseWeights(
|
||||
phantom_node_pair.target_phantom.packed_geometry_id);
|
||||
|
||||
datasource_vector = facade.GetUncompressedReverseDatasources(
|
||||
datasource_vector = facade->GetUncompressedReverseDatasources(
|
||||
phantom_node_pair.target_phantom.packed_geometry_id);
|
||||
|
||||
if (is_local_path)
|
||||
@ -339,13 +219,13 @@ template <class DataFacadeT> class BasicRoutingInterface
|
||||
}
|
||||
end_index = phantom_node_pair.target_phantom.fwd_segment_position;
|
||||
|
||||
id_vector = facade.GetUncompressedForwardGeometry(
|
||||
id_vector = facade->GetUncompressedForwardGeometry(
|
||||
phantom_node_pair.target_phantom.packed_geometry_id);
|
||||
|
||||
weight_vector = facade.GetUncompressedForwardWeights(
|
||||
weight_vector = facade->GetUncompressedForwardWeights(
|
||||
phantom_node_pair.target_phantom.packed_geometry_id);
|
||||
|
||||
datasource_vector = facade.GetUncompressedForwardDatasources(
|
||||
datasource_vector = facade->GetUncompressedForwardDatasources(
|
||||
phantom_node_pair.target_phantom.packed_geometry_id);
|
||||
}
|
||||
|
||||
@ -423,49 +303,19 @@ template <class DataFacadeT> class BasicRoutingInterface
|
||||
* @param to the node the CH edge finishes at
|
||||
* @param unpacked_path the sequence of original NodeIDs that make up the expanded CH edge
|
||||
*/
|
||||
void UnpackEdge(const DataFacadeT &facade,
|
||||
void UnpackEdge(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const NodeID from,
|
||||
const NodeID to,
|
||||
std::vector<NodeID> &unpacked_path) const
|
||||
{
|
||||
std::array<NodeID, 2> path{{from, to}};
|
||||
UnpackCHPath(
|
||||
facade,
|
||||
path.begin(),
|
||||
path.end(),
|
||||
[&unpacked_path](const std::pair<NodeID, NodeID> &edge, const EdgeData & /* data */) {
|
||||
unpacked_path.emplace_back(edge.first);
|
||||
});
|
||||
unpacked_path.emplace_back(to);
|
||||
}
|
||||
std::vector<NodeID> &unpacked_path) const;
|
||||
|
||||
void RetrievePackedPathFromHeap(const SearchEngineData::QueryHeap &forward_heap,
|
||||
const SearchEngineData::QueryHeap &reverse_heap,
|
||||
const NodeID middle_node_id,
|
||||
std::vector<NodeID> &packed_path) const
|
||||
{
|
||||
RetrievePackedPathFromSingleHeap(forward_heap, middle_node_id, packed_path);
|
||||
std::reverse(packed_path.begin(), packed_path.end());
|
||||
packed_path.emplace_back(middle_node_id);
|
||||
RetrievePackedPathFromSingleHeap(reverse_heap, middle_node_id, packed_path);
|
||||
}
|
||||
std::vector<NodeID> &packed_path) const;
|
||||
|
||||
void RetrievePackedPathFromSingleHeap(const SearchEngineData::QueryHeap &search_heap,
|
||||
const NodeID middle_node_id,
|
||||
std::vector<NodeID> &packed_path) const
|
||||
{
|
||||
NodeID current_node_id = middle_node_id;
|
||||
// all initial nodes will have itself as parent, or a node not in the heap
|
||||
// in case of a core search heap. We need a distinction between core entry nodes
|
||||
// and start nodes since otherwise start node specific code that assumes
|
||||
// node == node.parent (e.g. the loop code) might get actived.
|
||||
while (current_node_id != search_heap.GetData(current_node_id).parent &&
|
||||
search_heap.WasInserted(search_heap.GetData(current_node_id).parent))
|
||||
{
|
||||
current_node_id = search_heap.GetData(current_node_id).parent;
|
||||
packed_path.emplace_back(current_node_id);
|
||||
}
|
||||
}
|
||||
std::vector<NodeID> &packed_path) const;
|
||||
|
||||
// assumes that heaps are already setup correctly.
|
||||
// ATTENTION: This only works if no additional offset is supplied next to the Phantom Node
|
||||
@ -479,79 +329,14 @@ template <class DataFacadeT> class BasicRoutingInterface
|
||||
// && source_phantom.GetForwardWeightPlusOffset() > target_phantom.GetForwardWeightPlusOffset())
|
||||
// requires
|
||||
// a force loop, if the heaps have been initialized with positive offsets.
|
||||
void Search(const DataFacadeT &facade,
|
||||
void Search(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
std::int32_t &weight,
|
||||
std::vector<NodeID> &packed_leg,
|
||||
const bool force_loop_forward,
|
||||
const bool force_loop_reverse,
|
||||
const int duration_upper_bound = INVALID_EDGE_WEIGHT) const
|
||||
{
|
||||
NodeID middle = SPECIAL_NODEID;
|
||||
weight = duration_upper_bound;
|
||||
|
||||
// get offset to account for offsets on phantom nodes on compressed edges
|
||||
const auto min_edge_offset = std::min(0, forward_heap.MinKey());
|
||||
BOOST_ASSERT(min_edge_offset <= 0);
|
||||
// we only every insert negative offsets for nodes in the forward heap
|
||||
BOOST_ASSERT(reverse_heap.MinKey() >= 0);
|
||||
|
||||
// run two-Target Dijkstra routing step.
|
||||
const constexpr bool STALLING_ENABLED = true;
|
||||
while (0 < (forward_heap.Size() + reverse_heap.Size()))
|
||||
{
|
||||
if (!forward_heap.Empty())
|
||||
{
|
||||
RoutingStep(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
middle,
|
||||
weight,
|
||||
min_edge_offset,
|
||||
true,
|
||||
STALLING_ENABLED,
|
||||
force_loop_forward,
|
||||
force_loop_reverse);
|
||||
}
|
||||
if (!reverse_heap.Empty())
|
||||
{
|
||||
RoutingStep(facade,
|
||||
reverse_heap,
|
||||
forward_heap,
|
||||
middle,
|
||||
weight,
|
||||
min_edge_offset,
|
||||
false,
|
||||
STALLING_ENABLED,
|
||||
force_loop_reverse,
|
||||
force_loop_forward);
|
||||
}
|
||||
}
|
||||
|
||||
// No path found for both target nodes?
|
||||
if (duration_upper_bound <= weight || SPECIAL_NODEID == middle)
|
||||
{
|
||||
weight = INVALID_EDGE_WEIGHT;
|
||||
return;
|
||||
}
|
||||
|
||||
// Was a paths over one of the forward/reverse nodes not found?
|
||||
BOOST_ASSERT_MSG((SPECIAL_NODEID != middle && INVALID_EDGE_WEIGHT != weight),
|
||||
"no path found");
|
||||
|
||||
// make sure to correctly unpack loops
|
||||
if (weight != forward_heap.GetKey(middle) + reverse_heap.GetKey(middle))
|
||||
{
|
||||
// self loop makes up the full path
|
||||
packed_leg.push_back(middle);
|
||||
packed_leg.push_back(middle);
|
||||
}
|
||||
else
|
||||
{
|
||||
RetrievePackedPathFromHeap(forward_heap, reverse_heap, middle, packed_leg);
|
||||
}
|
||||
}
|
||||
const int duration_upper_bound = INVALID_EDGE_WEIGHT) const;
|
||||
|
||||
// assumes that heaps are already setup correctly.
|
||||
// A forced loop might be necessary, if source and target are on the same segment.
|
||||
@ -562,7 +347,7 @@ template <class DataFacadeT> class BasicRoutingInterface
|
||||
// && source_phantom.GetForwardWeightPlusOffset() > target_phantom.GetForwardWeightPlusOffset())
|
||||
// requires
|
||||
// a force loop, if the heaps have been initialized with positive offsets.
|
||||
void SearchWithCore(const DataFacadeT &facade,
|
||||
void SearchWithCore(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
SearchEngineData::QueryHeap &forward_core_heap,
|
||||
@ -571,389 +356,45 @@ template <class DataFacadeT> class BasicRoutingInterface
|
||||
std::vector<NodeID> &packed_leg,
|
||||
const bool force_loop_forward,
|
||||
const bool force_loop_reverse,
|
||||
int duration_upper_bound = INVALID_EDGE_WEIGHT) const
|
||||
{
|
||||
NodeID middle = SPECIAL_NODEID;
|
||||
weight = duration_upper_bound;
|
||||
|
||||
using CoreEntryPoint = std::tuple<NodeID, EdgeWeight, NodeID>;
|
||||
std::vector<CoreEntryPoint> forward_entry_points;
|
||||
std::vector<CoreEntryPoint> reverse_entry_points;
|
||||
|
||||
// get offset to account for offsets on phantom nodes on compressed edges
|
||||
const auto min_edge_offset = std::min(0, forward_heap.MinKey());
|
||||
// we only every insert negative offsets for nodes in the forward heap
|
||||
BOOST_ASSERT(reverse_heap.MinKey() >= 0);
|
||||
|
||||
const constexpr bool STALLING_ENABLED = true;
|
||||
// run two-Target Dijkstra routing step.
|
||||
while (0 < (forward_heap.Size() + reverse_heap.Size()))
|
||||
{
|
||||
if (!forward_heap.Empty())
|
||||
{
|
||||
if (facade.IsCoreNode(forward_heap.Min()))
|
||||
{
|
||||
const NodeID node = forward_heap.DeleteMin();
|
||||
const int key = forward_heap.GetKey(node);
|
||||
forward_entry_points.emplace_back(node, key, forward_heap.GetData(node).parent);
|
||||
}
|
||||
else
|
||||
{
|
||||
RoutingStep(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
middle,
|
||||
weight,
|
||||
min_edge_offset,
|
||||
true,
|
||||
STALLING_ENABLED,
|
||||
force_loop_forward,
|
||||
force_loop_reverse);
|
||||
}
|
||||
}
|
||||
if (!reverse_heap.Empty())
|
||||
{
|
||||
if (facade.IsCoreNode(reverse_heap.Min()))
|
||||
{
|
||||
const NodeID node = reverse_heap.DeleteMin();
|
||||
const int key = reverse_heap.GetKey(node);
|
||||
reverse_entry_points.emplace_back(node, key, reverse_heap.GetData(node).parent);
|
||||
}
|
||||
else
|
||||
{
|
||||
RoutingStep(facade,
|
||||
reverse_heap,
|
||||
forward_heap,
|
||||
middle,
|
||||
weight,
|
||||
min_edge_offset,
|
||||
false,
|
||||
STALLING_ENABLED,
|
||||
force_loop_reverse,
|
||||
force_loop_forward);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const auto insertInCoreHeap = [](const CoreEntryPoint &p,
|
||||
SearchEngineData::QueryHeap &core_heap) {
|
||||
NodeID id;
|
||||
EdgeWeight weight;
|
||||
NodeID parent;
|
||||
// TODO this should use std::apply when we get c++17 support
|
||||
std::tie(id, weight, parent) = p;
|
||||
core_heap.Insert(id, weight, parent);
|
||||
};
|
||||
|
||||
forward_core_heap.Clear();
|
||||
for (const auto &p : forward_entry_points)
|
||||
{
|
||||
insertInCoreHeap(p, forward_core_heap);
|
||||
}
|
||||
|
||||
reverse_core_heap.Clear();
|
||||
for (const auto &p : reverse_entry_points)
|
||||
{
|
||||
insertInCoreHeap(p, reverse_core_heap);
|
||||
}
|
||||
|
||||
// get offset to account for offsets on phantom nodes on compressed edges
|
||||
int min_core_edge_offset = 0;
|
||||
if (forward_core_heap.Size() > 0)
|
||||
{
|
||||
min_core_edge_offset = std::min(min_core_edge_offset, forward_core_heap.MinKey());
|
||||
}
|
||||
if (reverse_core_heap.Size() > 0 && reverse_core_heap.MinKey() < 0)
|
||||
{
|
||||
min_core_edge_offset = std::min(min_core_edge_offset, reverse_core_heap.MinKey());
|
||||
}
|
||||
BOOST_ASSERT(min_core_edge_offset <= 0);
|
||||
|
||||
// run two-target Dijkstra routing step on core with termination criterion
|
||||
const constexpr bool STALLING_DISABLED = false;
|
||||
while (0 < forward_core_heap.Size() && 0 < reverse_core_heap.Size() &&
|
||||
weight > (forward_core_heap.MinKey() + reverse_core_heap.MinKey()))
|
||||
{
|
||||
RoutingStep(facade,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
middle,
|
||||
weight,
|
||||
min_core_edge_offset,
|
||||
true,
|
||||
STALLING_DISABLED,
|
||||
force_loop_forward,
|
||||
force_loop_reverse);
|
||||
|
||||
RoutingStep(facade,
|
||||
reverse_core_heap,
|
||||
forward_core_heap,
|
||||
middle,
|
||||
weight,
|
||||
min_core_edge_offset,
|
||||
false,
|
||||
STALLING_DISABLED,
|
||||
force_loop_reverse,
|
||||
force_loop_forward);
|
||||
}
|
||||
|
||||
// No path found for both target nodes?
|
||||
if (duration_upper_bound <= weight || SPECIAL_NODEID == middle)
|
||||
{
|
||||
weight = INVALID_EDGE_WEIGHT;
|
||||
return;
|
||||
}
|
||||
|
||||
// Was a paths over one of the forward/reverse nodes not found?
|
||||
BOOST_ASSERT_MSG((SPECIAL_NODEID != middle && INVALID_EDGE_WEIGHT != weight),
|
||||
"no path found");
|
||||
|
||||
// we need to unpack sub path from core heaps
|
||||
if (facade.IsCoreNode(middle))
|
||||
{
|
||||
if (weight != forward_core_heap.GetKey(middle) + reverse_core_heap.GetKey(middle))
|
||||
{
|
||||
// self loop
|
||||
BOOST_ASSERT(forward_core_heap.GetData(middle).parent == middle &&
|
||||
reverse_core_heap.GetData(middle).parent == middle);
|
||||
packed_leg.push_back(middle);
|
||||
packed_leg.push_back(middle);
|
||||
}
|
||||
else
|
||||
{
|
||||
std::vector<NodeID> packed_core_leg;
|
||||
RetrievePackedPathFromHeap(
|
||||
forward_core_heap, reverse_core_heap, middle, packed_core_leg);
|
||||
BOOST_ASSERT(packed_core_leg.size() > 0);
|
||||
RetrievePackedPathFromSingleHeap(forward_heap, packed_core_leg.front(), packed_leg);
|
||||
std::reverse(packed_leg.begin(), packed_leg.end());
|
||||
packed_leg.insert(packed_leg.end(), packed_core_leg.begin(), packed_core_leg.end());
|
||||
RetrievePackedPathFromSingleHeap(reverse_heap, packed_core_leg.back(), packed_leg);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (weight != forward_heap.GetKey(middle) + reverse_heap.GetKey(middle))
|
||||
{
|
||||
// self loop
|
||||
BOOST_ASSERT(forward_heap.GetData(middle).parent == middle &&
|
||||
reverse_heap.GetData(middle).parent == middle);
|
||||
packed_leg.push_back(middle);
|
||||
packed_leg.push_back(middle);
|
||||
}
|
||||
else
|
||||
{
|
||||
RetrievePackedPathFromHeap(forward_heap, reverse_heap, middle, packed_leg);
|
||||
}
|
||||
}
|
||||
}
|
||||
int duration_upper_bound = INVALID_EDGE_WEIGHT) const;
|
||||
|
||||
bool NeedsLoopForward(const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom) const
|
||||
{
|
||||
return source_phantom.forward_segment_id.enabled &&
|
||||
target_phantom.forward_segment_id.enabled &&
|
||||
source_phantom.forward_segment_id.id == target_phantom.forward_segment_id.id &&
|
||||
source_phantom.GetForwardWeightPlusOffset() >
|
||||
target_phantom.GetForwardWeightPlusOffset();
|
||||
}
|
||||
const PhantomNode &target_phantom) const;
|
||||
|
||||
bool NeedsLoopBackwards(const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom) const
|
||||
{
|
||||
return source_phantom.reverse_segment_id.enabled &&
|
||||
target_phantom.reverse_segment_id.enabled &&
|
||||
source_phantom.reverse_segment_id.id == target_phantom.reverse_segment_id.id &&
|
||||
source_phantom.GetReverseWeightPlusOffset() >
|
||||
target_phantom.GetReverseWeightPlusOffset();
|
||||
}
|
||||
const PhantomNode &target_phantom) const;
|
||||
|
||||
double GetPathDistance(const DataFacadeT &facade,
|
||||
double GetPathDistance(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const std::vector<NodeID> &packed_path,
|
||||
const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom) const
|
||||
{
|
||||
std::vector<PathData> unpacked_path;
|
||||
PhantomNodes nodes;
|
||||
nodes.source_phantom = source_phantom;
|
||||
nodes.target_phantom = target_phantom;
|
||||
UnpackPath(facade, packed_path.begin(), packed_path.end(), nodes, unpacked_path);
|
||||
|
||||
using util::coordinate_calculation::detail::DEGREE_TO_RAD;
|
||||
using util::coordinate_calculation::detail::EARTH_RADIUS;
|
||||
|
||||
double distance = 0;
|
||||
double prev_lat =
|
||||
static_cast<double>(toFloating(source_phantom.location.lat)) * DEGREE_TO_RAD;
|
||||
double prev_lon =
|
||||
static_cast<double>(toFloating(source_phantom.location.lon)) * DEGREE_TO_RAD;
|
||||
double prev_cos = std::cos(prev_lat);
|
||||
for (const auto &p : unpacked_path)
|
||||
{
|
||||
const auto current_coordinate = facade.GetCoordinateOfNode(p.turn_via_node);
|
||||
|
||||
const double current_lat =
|
||||
static_cast<double>(toFloating(current_coordinate.lat)) * DEGREE_TO_RAD;
|
||||
const double current_lon =
|
||||
static_cast<double>(toFloating(current_coordinate.lon)) * DEGREE_TO_RAD;
|
||||
const double current_cos = std::cos(current_lat);
|
||||
|
||||
const double sin_dlon = std::sin((prev_lon - current_lon) / 2.0);
|
||||
const double sin_dlat = std::sin((prev_lat - current_lat) / 2.0);
|
||||
|
||||
const double aharv = sin_dlat * sin_dlat + prev_cos * current_cos * sin_dlon * sin_dlon;
|
||||
const double charv = 2. * std::atan2(std::sqrt(aharv), std::sqrt(1.0 - aharv));
|
||||
distance += EARTH_RADIUS * charv;
|
||||
|
||||
prev_lat = current_lat;
|
||||
prev_lon = current_lon;
|
||||
prev_cos = current_cos;
|
||||
}
|
||||
|
||||
const double current_lat =
|
||||
static_cast<double>(toFloating(target_phantom.location.lat)) * DEGREE_TO_RAD;
|
||||
const double current_lon =
|
||||
static_cast<double>(toFloating(target_phantom.location.lon)) * DEGREE_TO_RAD;
|
||||
const double current_cos = std::cos(current_lat);
|
||||
|
||||
const double sin_dlon = std::sin((prev_lon - current_lon) / 2.0);
|
||||
const double sin_dlat = std::sin((prev_lat - current_lat) / 2.0);
|
||||
|
||||
const double aharv = sin_dlat * sin_dlat + prev_cos * current_cos * sin_dlon * sin_dlon;
|
||||
const double charv = 2. * std::atan2(std::sqrt(aharv), std::sqrt(1.0 - aharv));
|
||||
distance += EARTH_RADIUS * charv;
|
||||
|
||||
return distance;
|
||||
}
|
||||
const PhantomNode &target_phantom) const;
|
||||
|
||||
// Requires the heaps for be empty
|
||||
// If heaps should be adjusted to be initialized outside of this function,
|
||||
// the addition of force_loop parameters might be required
|
||||
double GetNetworkDistanceWithCore(const DataFacadeT &facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
SearchEngineData::QueryHeap &forward_core_heap,
|
||||
SearchEngineData::QueryHeap &reverse_core_heap,
|
||||
const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom,
|
||||
int duration_upper_bound = INVALID_EDGE_WEIGHT) const
|
||||
{
|
||||
BOOST_ASSERT(forward_heap.Empty());
|
||||
BOOST_ASSERT(reverse_heap.Empty());
|
||||
|
||||
if (source_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.forward_segment_id.id,
|
||||
-source_phantom.GetForwardWeightPlusOffset(),
|
||||
source_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (source_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.reverse_segment_id.id,
|
||||
-source_phantom.GetReverseWeightPlusOffset(),
|
||||
source_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
if (target_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.forward_segment_id.id,
|
||||
target_phantom.GetForwardWeightPlusOffset(),
|
||||
target_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (target_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.reverse_segment_id.id,
|
||||
target_phantom.GetReverseWeightPlusOffset(),
|
||||
target_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
const bool constexpr DO_NOT_FORCE_LOOPS =
|
||||
false; // prevents forcing of loops, since offsets are set correctly
|
||||
|
||||
int duration = INVALID_EDGE_WEIGHT;
|
||||
std::vector<NodeID> packed_path;
|
||||
SearchWithCore(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
duration,
|
||||
packed_path,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
duration_upper_bound);
|
||||
|
||||
double distance = std::numeric_limits<double>::max();
|
||||
if (duration != INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
return GetPathDistance(facade, packed_path, source_phantom, target_phantom);
|
||||
}
|
||||
return distance;
|
||||
}
|
||||
double
|
||||
GetNetworkDistanceWithCore(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
SearchEngineData::QueryHeap &forward_core_heap,
|
||||
SearchEngineData::QueryHeap &reverse_core_heap,
|
||||
const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom,
|
||||
int duration_upper_bound = INVALID_EDGE_WEIGHT) const;
|
||||
|
||||
// Requires the heaps for be empty
|
||||
// If heaps should be adjusted to be initialized outside of this function,
|
||||
// the addition of force_loop parameters might be required
|
||||
double GetNetworkDistance(const DataFacadeT &facade,
|
||||
double GetNetworkDistance(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom,
|
||||
int duration_upper_bound = INVALID_EDGE_WEIGHT) const
|
||||
{
|
||||
BOOST_ASSERT(forward_heap.Empty());
|
||||
BOOST_ASSERT(reverse_heap.Empty());
|
||||
|
||||
if (source_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.forward_segment_id.id,
|
||||
-source_phantom.GetForwardWeightPlusOffset(),
|
||||
source_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (source_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.reverse_segment_id.id,
|
||||
-source_phantom.GetReverseWeightPlusOffset(),
|
||||
source_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
if (target_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.forward_segment_id.id,
|
||||
target_phantom.GetForwardWeightPlusOffset(),
|
||||
target_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (target_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.reverse_segment_id.id,
|
||||
target_phantom.GetReverseWeightPlusOffset(),
|
||||
target_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
const bool constexpr DO_NOT_FORCE_LOOPS =
|
||||
false; // prevents forcing of loops, since offsets are set correctly
|
||||
|
||||
int duration = INVALID_EDGE_WEIGHT;
|
||||
std::vector<NodeID> packed_path;
|
||||
Search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
duration,
|
||||
packed_path,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
duration_upper_bound);
|
||||
|
||||
if (duration == INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
return std::numeric_limits<double>::max();
|
||||
}
|
||||
|
||||
return GetPathDistance(facade, packed_path, source_phantom, target_phantom);
|
||||
}
|
||||
int duration_upper_bound = INVALID_EDGE_WEIGHT) const;
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace routing_algorithms
|
||||
} // namespace engine
|
||||
} // namespace osrm
|
||||
|
||||
#endif // ROUTING_BASE_HPP
|
||||
|
@ -5,11 +5,13 @@
|
||||
|
||||
#include "engine/routing_algorithms/routing_base.hpp"
|
||||
|
||||
#include "engine/datafacade/datafacade_base.hpp"
|
||||
#include "engine/search_engine_data.hpp"
|
||||
#include "util/integer_range.hpp"
|
||||
|
||||
#include <boost/assert.hpp>
|
||||
#include <boost/optional.hpp>
|
||||
#include <memory>
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
@ -18,10 +20,9 @@ namespace engine
|
||||
namespace routing_algorithms
|
||||
{
|
||||
|
||||
template <class DataFacadeT>
|
||||
class ShortestPathRouting final : public BasicRoutingInterface<DataFacadeT>
|
||||
class ShortestPathRouting final : public BasicRoutingInterface
|
||||
{
|
||||
using super = BasicRoutingInterface<DataFacadeT>;
|
||||
using super = BasicRoutingInterface;
|
||||
using QueryHeap = SearchEngineData::QueryHeap;
|
||||
SearchEngineData &engine_working_data;
|
||||
const static constexpr bool DO_NOT_FORCE_LOOP = false;
|
||||
@ -36,7 +37,7 @@ class ShortestPathRouting final : public BasicRoutingInterface<DataFacadeT>
|
||||
|
||||
// allows a uturn at the target_phantom
|
||||
// searches source forward/reverse -> target forward/reverse
|
||||
void SearchWithUTurn(const DataFacadeT &facade,
|
||||
void SearchWithUTurn(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
QueryHeap &forward_heap,
|
||||
QueryHeap &reverse_heap,
|
||||
QueryHeap &forward_core_heap,
|
||||
@ -50,83 +51,12 @@ class ShortestPathRouting final : public BasicRoutingInterface<DataFacadeT>
|
||||
const int total_weight_to_forward,
|
||||
const int total_weight_to_reverse,
|
||||
int &new_total_weight,
|
||||
std::vector<NodeID> &leg_packed_path) const
|
||||
{
|
||||
forward_heap.Clear();
|
||||
reverse_heap.Clear();
|
||||
if (search_from_forward_node)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.forward_segment_id.id,
|
||||
-source_phantom.GetForwardWeightPlusOffset(),
|
||||
source_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (search_from_reverse_node)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.reverse_segment_id.id,
|
||||
-source_phantom.GetReverseWeightPlusOffset(),
|
||||
source_phantom.reverse_segment_id.id);
|
||||
}
|
||||
if (search_to_forward_node)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.forward_segment_id.id,
|
||||
target_phantom.GetForwardWeightPlusOffset(),
|
||||
target_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (search_to_reverse_node)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.reverse_segment_id.id,
|
||||
target_phantom.GetReverseWeightPlusOffset(),
|
||||
target_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
BOOST_ASSERT(forward_heap.Size() > 0);
|
||||
BOOST_ASSERT(reverse_heap.Size() > 0);
|
||||
|
||||
// this is only relevent if source and target are on the same compressed edge
|
||||
auto is_oneway_source = !(search_from_forward_node && search_from_reverse_node);
|
||||
auto is_oneway_target = !(search_to_forward_node && search_to_reverse_node);
|
||||
// we only enable loops here if we can't search from forward to backward node
|
||||
auto needs_loop_forwad =
|
||||
is_oneway_source && super::NeedsLoopForward(source_phantom, target_phantom);
|
||||
auto needs_loop_backwards =
|
||||
is_oneway_target && super::NeedsLoopBackwards(source_phantom, target_phantom);
|
||||
if (facade.GetCoreSize() > 0)
|
||||
{
|
||||
forward_core_heap.Clear();
|
||||
reverse_core_heap.Clear();
|
||||
BOOST_ASSERT(forward_core_heap.Size() == 0);
|
||||
BOOST_ASSERT(reverse_core_heap.Size() == 0);
|
||||
super::SearchWithCore(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
new_total_weight,
|
||||
leg_packed_path,
|
||||
needs_loop_forwad,
|
||||
needs_loop_backwards);
|
||||
}
|
||||
else
|
||||
{
|
||||
super::Search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
new_total_weight,
|
||||
leg_packed_path,
|
||||
needs_loop_forwad,
|
||||
needs_loop_backwards);
|
||||
}
|
||||
// if no route is found between two parts of the via-route, the entire route becomes
|
||||
// invalid. Adding to invalid edge weight sadly doesn't return an invalid edge weight. Here
|
||||
// we prevent the possible overflow, faking the addition of infinity + x == infinity
|
||||
if (new_total_weight != INVALID_EDGE_WEIGHT)
|
||||
new_total_weight += std::min(total_weight_to_forward, total_weight_to_reverse);
|
||||
}
|
||||
std::vector<NodeID> &leg_packed_path) const;
|
||||
|
||||
// searches shortest path between:
|
||||
// source forward/reverse -> target forward
|
||||
// source forward/reverse -> target reverse
|
||||
void Search(const DataFacadeT &facade,
|
||||
void Search(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
QueryHeap &forward_heap,
|
||||
QueryHeap &reverse_heap,
|
||||
QueryHeap &forward_core_heap,
|
||||
@ -142,387 +72,22 @@ class ShortestPathRouting final : public BasicRoutingInterface<DataFacadeT>
|
||||
int &new_total_weight_to_forward,
|
||||
int &new_total_weight_to_reverse,
|
||||
std::vector<NodeID> &leg_packed_path_forward,
|
||||
std::vector<NodeID> &leg_packed_path_reverse) const
|
||||
{
|
||||
if (search_to_forward_node)
|
||||
{
|
||||
forward_heap.Clear();
|
||||
reverse_heap.Clear();
|
||||
reverse_heap.Insert(target_phantom.forward_segment_id.id,
|
||||
target_phantom.GetForwardWeightPlusOffset(),
|
||||
target_phantom.forward_segment_id.id);
|
||||
std::vector<NodeID> &leg_packed_path_reverse) const;
|
||||
|
||||
if (search_from_forward_node)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.forward_segment_id.id,
|
||||
total_weight_to_forward -
|
||||
source_phantom.GetForwardWeightPlusOffset(),
|
||||
source_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (search_from_reverse_node)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.reverse_segment_id.id,
|
||||
total_weight_to_reverse -
|
||||
source_phantom.GetReverseWeightPlusOffset(),
|
||||
source_phantom.reverse_segment_id.id);
|
||||
}
|
||||
BOOST_ASSERT(forward_heap.Size() > 0);
|
||||
BOOST_ASSERT(reverse_heap.Size() > 0);
|
||||
|
||||
if (facade.GetCoreSize() > 0)
|
||||
{
|
||||
forward_core_heap.Clear();
|
||||
reverse_core_heap.Clear();
|
||||
BOOST_ASSERT(forward_core_heap.Size() == 0);
|
||||
BOOST_ASSERT(reverse_core_heap.Size() == 0);
|
||||
super::SearchWithCore(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
new_total_weight_to_forward,
|
||||
leg_packed_path_forward,
|
||||
super::NeedsLoopForward(source_phantom, target_phantom),
|
||||
DO_NOT_FORCE_LOOP);
|
||||
}
|
||||
else
|
||||
{
|
||||
super::Search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
new_total_weight_to_forward,
|
||||
leg_packed_path_forward,
|
||||
super::NeedsLoopForward(source_phantom, target_phantom),
|
||||
DO_NOT_FORCE_LOOP);
|
||||
}
|
||||
}
|
||||
|
||||
if (search_to_reverse_node)
|
||||
{
|
||||
forward_heap.Clear();
|
||||
reverse_heap.Clear();
|
||||
reverse_heap.Insert(target_phantom.reverse_segment_id.id,
|
||||
target_phantom.GetReverseWeightPlusOffset(),
|
||||
target_phantom.reverse_segment_id.id);
|
||||
if (search_from_forward_node)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.forward_segment_id.id,
|
||||
total_weight_to_forward -
|
||||
source_phantom.GetForwardWeightPlusOffset(),
|
||||
source_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (search_from_reverse_node)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.reverse_segment_id.id,
|
||||
total_weight_to_reverse -
|
||||
source_phantom.GetReverseWeightPlusOffset(),
|
||||
source_phantom.reverse_segment_id.id);
|
||||
}
|
||||
BOOST_ASSERT(forward_heap.Size() > 0);
|
||||
BOOST_ASSERT(reverse_heap.Size() > 0);
|
||||
if (facade.GetCoreSize() > 0)
|
||||
{
|
||||
forward_core_heap.Clear();
|
||||
reverse_core_heap.Clear();
|
||||
BOOST_ASSERT(forward_core_heap.Size() == 0);
|
||||
BOOST_ASSERT(reverse_core_heap.Size() == 0);
|
||||
super::SearchWithCore(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
new_total_weight_to_reverse,
|
||||
leg_packed_path_reverse,
|
||||
DO_NOT_FORCE_LOOP,
|
||||
super::NeedsLoopBackwards(source_phantom, target_phantom));
|
||||
}
|
||||
else
|
||||
{
|
||||
super::Search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
new_total_weight_to_reverse,
|
||||
leg_packed_path_reverse,
|
||||
DO_NOT_FORCE_LOOP,
|
||||
super::NeedsLoopBackwards(source_phantom, target_phantom));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void UnpackLegs(const DataFacadeT &facade,
|
||||
void UnpackLegs(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const std::vector<PhantomNodes> &phantom_nodes_vector,
|
||||
const std::vector<NodeID> &total_packed_path,
|
||||
const std::vector<std::size_t> &packed_leg_begin,
|
||||
const int shortest_path_length,
|
||||
InternalRouteResult &raw_route_data) const
|
||||
{
|
||||
raw_route_data.unpacked_path_segments.resize(packed_leg_begin.size() - 1);
|
||||
InternalRouteResult &raw_route_data) const;
|
||||
|
||||
raw_route_data.shortest_path_length = shortest_path_length;
|
||||
|
||||
for (const auto current_leg : util::irange<std::size_t>(0UL, packed_leg_begin.size() - 1))
|
||||
{
|
||||
auto leg_begin = total_packed_path.begin() + packed_leg_begin[current_leg];
|
||||
auto leg_end = total_packed_path.begin() + packed_leg_begin[current_leg + 1];
|
||||
const auto &unpack_phantom_node_pair = phantom_nodes_vector[current_leg];
|
||||
super::UnpackPath(facade,
|
||||
leg_begin,
|
||||
leg_end,
|
||||
unpack_phantom_node_pair,
|
||||
raw_route_data.unpacked_path_segments[current_leg]);
|
||||
|
||||
raw_route_data.source_traversed_in_reverse.push_back(
|
||||
(*leg_begin !=
|
||||
phantom_nodes_vector[current_leg].source_phantom.forward_segment_id.id));
|
||||
raw_route_data.target_traversed_in_reverse.push_back(
|
||||
(*std::prev(leg_end) !=
|
||||
phantom_nodes_vector[current_leg].target_phantom.forward_segment_id.id));
|
||||
}
|
||||
}
|
||||
|
||||
void operator()(const DataFacadeT &facade,
|
||||
void operator()(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const std::vector<PhantomNodes> &phantom_nodes_vector,
|
||||
const boost::optional<bool> continue_straight_at_waypoint,
|
||||
InternalRouteResult &raw_route_data) const
|
||||
{
|
||||
const bool allow_uturn_at_waypoint =
|
||||
!(continue_straight_at_waypoint ? *continue_straight_at_waypoint
|
||||
: facade.GetContinueStraightDefault());
|
||||
|
||||
engine_working_data.InitializeOrClearFirstThreadLocalStorage(facade.GetNumberOfNodes());
|
||||
engine_working_data.InitializeOrClearSecondThreadLocalStorage(facade.GetNumberOfNodes());
|
||||
|
||||
QueryHeap &forward_heap = *(engine_working_data.forward_heap_1);
|
||||
QueryHeap &reverse_heap = *(engine_working_data.reverse_heap_1);
|
||||
QueryHeap &forward_core_heap = *(engine_working_data.forward_heap_2);
|
||||
QueryHeap &reverse_core_heap = *(engine_working_data.reverse_heap_2);
|
||||
|
||||
int total_weight_to_forward = 0;
|
||||
int total_weight_to_reverse = 0;
|
||||
bool search_from_forward_node =
|
||||
phantom_nodes_vector.front().source_phantom.forward_segment_id.enabled;
|
||||
bool search_from_reverse_node =
|
||||
phantom_nodes_vector.front().source_phantom.reverse_segment_id.enabled;
|
||||
|
||||
std::vector<NodeID> prev_packed_leg_to_forward;
|
||||
std::vector<NodeID> prev_packed_leg_to_reverse;
|
||||
|
||||
std::vector<NodeID> total_packed_path_to_forward;
|
||||
std::vector<std::size_t> packed_leg_to_forward_begin;
|
||||
std::vector<NodeID> total_packed_path_to_reverse;
|
||||
std::vector<std::size_t> packed_leg_to_reverse_begin;
|
||||
|
||||
std::size_t current_leg = 0;
|
||||
// this implements a dynamic program that finds the shortest route through
|
||||
// a list of vias
|
||||
for (const auto &phantom_node_pair : phantom_nodes_vector)
|
||||
{
|
||||
int new_total_weight_to_forward = INVALID_EDGE_WEIGHT;
|
||||
int new_total_weight_to_reverse = INVALID_EDGE_WEIGHT;
|
||||
|
||||
std::vector<NodeID> packed_leg_to_forward;
|
||||
std::vector<NodeID> packed_leg_to_reverse;
|
||||
|
||||
const auto &source_phantom = phantom_node_pair.source_phantom;
|
||||
const auto &target_phantom = phantom_node_pair.target_phantom;
|
||||
|
||||
bool search_to_forward_node = target_phantom.forward_segment_id.enabled;
|
||||
bool search_to_reverse_node = target_phantom.reverse_segment_id.enabled;
|
||||
|
||||
BOOST_ASSERT(!search_from_forward_node || source_phantom.forward_segment_id.enabled);
|
||||
BOOST_ASSERT(!search_from_reverse_node || source_phantom.reverse_segment_id.enabled);
|
||||
|
||||
BOOST_ASSERT(search_from_forward_node || search_from_reverse_node);
|
||||
|
||||
if (search_to_reverse_node || search_to_forward_node)
|
||||
{
|
||||
if (allow_uturn_at_waypoint)
|
||||
{
|
||||
SearchWithUTurn(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
search_from_forward_node,
|
||||
search_from_reverse_node,
|
||||
search_to_forward_node,
|
||||
search_to_reverse_node,
|
||||
source_phantom,
|
||||
target_phantom,
|
||||
total_weight_to_forward,
|
||||
total_weight_to_reverse,
|
||||
new_total_weight_to_forward,
|
||||
packed_leg_to_forward);
|
||||
// if only the reverse node is valid (e.g. when using the match plugin) we
|
||||
// actually need to move
|
||||
if (!target_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
BOOST_ASSERT(target_phantom.reverse_segment_id.enabled);
|
||||
new_total_weight_to_reverse = new_total_weight_to_forward;
|
||||
packed_leg_to_reverse = std::move(packed_leg_to_forward);
|
||||
new_total_weight_to_forward = INVALID_EDGE_WEIGHT;
|
||||
}
|
||||
else if (target_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
new_total_weight_to_reverse = new_total_weight_to_forward;
|
||||
packed_leg_to_reverse = packed_leg_to_forward;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
search_from_forward_node,
|
||||
search_from_reverse_node,
|
||||
search_to_forward_node,
|
||||
search_to_reverse_node,
|
||||
source_phantom,
|
||||
target_phantom,
|
||||
total_weight_to_forward,
|
||||
total_weight_to_reverse,
|
||||
new_total_weight_to_forward,
|
||||
new_total_weight_to_reverse,
|
||||
packed_leg_to_forward,
|
||||
packed_leg_to_reverse);
|
||||
}
|
||||
}
|
||||
|
||||
// No path found for both target nodes?
|
||||
if ((INVALID_EDGE_WEIGHT == new_total_weight_to_forward) &&
|
||||
(INVALID_EDGE_WEIGHT == new_total_weight_to_reverse))
|
||||
{
|
||||
raw_route_data.shortest_path_length = INVALID_EDGE_WEIGHT;
|
||||
raw_route_data.alternative_path_length = INVALID_EDGE_WEIGHT;
|
||||
return;
|
||||
}
|
||||
|
||||
// we need to figure out how the new legs connect to the previous ones
|
||||
if (current_leg > 0)
|
||||
{
|
||||
bool forward_to_forward =
|
||||
(new_total_weight_to_forward != INVALID_EDGE_WEIGHT) &&
|
||||
packed_leg_to_forward.front() == source_phantom.forward_segment_id.id;
|
||||
bool reverse_to_forward =
|
||||
(new_total_weight_to_forward != INVALID_EDGE_WEIGHT) &&
|
||||
packed_leg_to_forward.front() == source_phantom.reverse_segment_id.id;
|
||||
bool forward_to_reverse =
|
||||
(new_total_weight_to_reverse != INVALID_EDGE_WEIGHT) &&
|
||||
packed_leg_to_reverse.front() == source_phantom.forward_segment_id.id;
|
||||
bool reverse_to_reverse =
|
||||
(new_total_weight_to_reverse != INVALID_EDGE_WEIGHT) &&
|
||||
packed_leg_to_reverse.front() == source_phantom.reverse_segment_id.id;
|
||||
|
||||
BOOST_ASSERT(!forward_to_forward || !reverse_to_forward);
|
||||
BOOST_ASSERT(!forward_to_reverse || !reverse_to_reverse);
|
||||
|
||||
// in this case we always need to copy
|
||||
if (forward_to_forward && forward_to_reverse)
|
||||
{
|
||||
// in this case we copy the path leading to the source forward node
|
||||
// and change the case
|
||||
total_packed_path_to_reverse = total_packed_path_to_forward;
|
||||
packed_leg_to_reverse_begin = packed_leg_to_forward_begin;
|
||||
forward_to_reverse = false;
|
||||
reverse_to_reverse = true;
|
||||
}
|
||||
else if (reverse_to_forward && reverse_to_reverse)
|
||||
{
|
||||
total_packed_path_to_forward = total_packed_path_to_reverse;
|
||||
packed_leg_to_forward_begin = packed_leg_to_reverse_begin;
|
||||
reverse_to_forward = false;
|
||||
forward_to_forward = true;
|
||||
}
|
||||
BOOST_ASSERT(!forward_to_forward || !forward_to_reverse);
|
||||
BOOST_ASSERT(!reverse_to_forward || !reverse_to_reverse);
|
||||
|
||||
// in this case we just need to swap to regain the correct mapping
|
||||
if (reverse_to_forward || forward_to_reverse)
|
||||
{
|
||||
total_packed_path_to_forward.swap(total_packed_path_to_reverse);
|
||||
packed_leg_to_forward_begin.swap(packed_leg_to_reverse_begin);
|
||||
}
|
||||
}
|
||||
|
||||
if (new_total_weight_to_forward != INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
BOOST_ASSERT(target_phantom.forward_segment_id.enabled);
|
||||
|
||||
packed_leg_to_forward_begin.push_back(total_packed_path_to_forward.size());
|
||||
total_packed_path_to_forward.insert(total_packed_path_to_forward.end(),
|
||||
packed_leg_to_forward.begin(),
|
||||
packed_leg_to_forward.end());
|
||||
search_from_forward_node = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
total_packed_path_to_forward.clear();
|
||||
packed_leg_to_forward_begin.clear();
|
||||
search_from_forward_node = false;
|
||||
}
|
||||
|
||||
if (new_total_weight_to_reverse != INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
BOOST_ASSERT(target_phantom.reverse_segment_id.enabled);
|
||||
|
||||
packed_leg_to_reverse_begin.push_back(total_packed_path_to_reverse.size());
|
||||
total_packed_path_to_reverse.insert(total_packed_path_to_reverse.end(),
|
||||
packed_leg_to_reverse.begin(),
|
||||
packed_leg_to_reverse.end());
|
||||
search_from_reverse_node = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
total_packed_path_to_reverse.clear();
|
||||
packed_leg_to_reverse_begin.clear();
|
||||
search_from_reverse_node = false;
|
||||
}
|
||||
|
||||
prev_packed_leg_to_forward = std::move(packed_leg_to_forward);
|
||||
prev_packed_leg_to_reverse = std::move(packed_leg_to_reverse);
|
||||
|
||||
total_weight_to_forward = new_total_weight_to_forward;
|
||||
total_weight_to_reverse = new_total_weight_to_reverse;
|
||||
|
||||
++current_leg;
|
||||
}
|
||||
|
||||
BOOST_ASSERT(total_weight_to_forward != INVALID_EDGE_WEIGHT ||
|
||||
total_weight_to_reverse != INVALID_EDGE_WEIGHT);
|
||||
|
||||
// We make sure the fastest route is always in packed_legs_to_forward
|
||||
if (total_weight_to_forward > total_weight_to_reverse)
|
||||
{
|
||||
// insert sentinel
|
||||
packed_leg_to_reverse_begin.push_back(total_packed_path_to_reverse.size());
|
||||
BOOST_ASSERT(packed_leg_to_reverse_begin.size() == phantom_nodes_vector.size() + 1);
|
||||
|
||||
UnpackLegs(facade,
|
||||
phantom_nodes_vector,
|
||||
total_packed_path_to_reverse,
|
||||
packed_leg_to_reverse_begin,
|
||||
total_weight_to_reverse,
|
||||
raw_route_data);
|
||||
}
|
||||
else
|
||||
{
|
||||
// insert sentinel
|
||||
packed_leg_to_forward_begin.push_back(total_packed_path_to_forward.size());
|
||||
BOOST_ASSERT(packed_leg_to_forward_begin.size() == phantom_nodes_vector.size() + 1);
|
||||
|
||||
UnpackLegs(facade,
|
||||
phantom_nodes_vector,
|
||||
total_packed_path_to_forward,
|
||||
packed_leg_to_forward_begin,
|
||||
total_weight_to_forward,
|
||||
raw_route_data);
|
||||
}
|
||||
}
|
||||
InternalRouteResult &raw_route_data) const;
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace routing_algorithms
|
||||
} // namespace engine
|
||||
} // namespace osrm
|
||||
|
||||
#endif /* SHORTEST_PATH_HPP */
|
||||
|
@ -180,7 +180,7 @@ Status MatchPlugin::HandleRequest(const std::shared_ptr<const datafacade::BaseDa
|
||||
}
|
||||
|
||||
// call the actual map matching
|
||||
SubMatchingList sub_matchings = map_matching(*facade,
|
||||
SubMatchingList sub_matchings = map_matching(facade,
|
||||
candidates_lists,
|
||||
parameters.coordinates,
|
||||
parameters.timestamps,
|
||||
@ -211,7 +211,7 @@ Status MatchPlugin::HandleRequest(const std::shared_ptr<const datafacade::BaseDa
|
||||
// bi-directional
|
||||
// phantom nodes for possible uturns
|
||||
shortest_path(
|
||||
*facade, sub_routes[index].segment_end_coordinates, {false}, sub_routes[index]);
|
||||
facade, sub_routes[index].segment_end_coordinates, {false}, sub_routes[index]);
|
||||
BOOST_ASSERT(sub_routes[index].shortest_path_length != INVALID_EDGE_WEIGHT);
|
||||
}
|
||||
|
||||
|
@ -61,7 +61,7 @@ Status TablePlugin::HandleRequest(const std::shared_ptr<const datafacade::BaseDa
|
||||
|
||||
auto snapped_phantoms = SnapPhantomNodes(GetPhantomNodes(*facade, params));
|
||||
auto result_table =
|
||||
distance_table(*facade, snapped_phantoms, params.sources, params.destinations);
|
||||
distance_table(facade, snapped_phantoms, params.sources, params.destinations);
|
||||
|
||||
if (result_table.empty())
|
||||
{
|
||||
|
@ -114,9 +114,10 @@ SCC_Component SplitUnaccessibleLocations(const std::size_t number_of_locations,
|
||||
return SCC_Component(std::move(components), std::move(range));
|
||||
}
|
||||
|
||||
InternalRouteResult TripPlugin::ComputeRoute(const datafacade::BaseDataFacade &facade,
|
||||
const std::vector<PhantomNode> &snapped_phantoms,
|
||||
const std::vector<NodeID> &trip) const
|
||||
InternalRouteResult
|
||||
TripPlugin::ComputeRoute(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const std::vector<PhantomNode> &snapped_phantoms,
|
||||
const std::vector<NodeID> &trip) const
|
||||
{
|
||||
InternalRouteResult min_route;
|
||||
// given he final trip, compute total duration and return the route and location permutation
|
||||
@ -175,7 +176,7 @@ Status TripPlugin::HandleRequest(const std::shared_ptr<const datafacade::BaseDat
|
||||
|
||||
// compute the duration table of all phantom nodes
|
||||
const auto result_table = util::DistTableWrapper<EdgeWeight>(
|
||||
duration_table(*facade, snapped_phantoms, {}, {}), number_of_locations);
|
||||
duration_table(facade, snapped_phantoms, {}, {}), number_of_locations);
|
||||
|
||||
if (result_table.size() == 0)
|
||||
{
|
||||
@ -233,7 +234,7 @@ Status TripPlugin::HandleRequest(const std::shared_ptr<const datafacade::BaseDat
|
||||
routes.reserve(trips.size());
|
||||
for (const auto &trip : trips)
|
||||
{
|
||||
routes.push_back(ComputeRoute(*facade, snapped_phantoms, trip));
|
||||
routes.push_back(ComputeRoute(facade, snapped_phantoms, trip));
|
||||
}
|
||||
|
||||
api::TripAPI trip_api{*facade, parameters};
|
||||
|
@ -88,16 +88,16 @@ Status ViaRoutePlugin::HandleRequest(const std::shared_ptr<const datafacade::Bas
|
||||
{
|
||||
if (route_parameters.alternatives && facade->GetCoreSize() == 0)
|
||||
{
|
||||
alternative_path(*facade, raw_route.segment_end_coordinates.front(), raw_route);
|
||||
alternative_path(facade, raw_route.segment_end_coordinates.front(), raw_route);
|
||||
}
|
||||
else
|
||||
{
|
||||
direct_shortest_path(*facade, raw_route.segment_end_coordinates, raw_route);
|
||||
direct_shortest_path(facade, raw_route.segment_end_coordinates, raw_route);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
shortest_path(*facade,
|
||||
shortest_path(facade,
|
||||
raw_route.segment_end_coordinates,
|
||||
route_parameters.continue_straight,
|
||||
raw_route);
|
||||
|
770
src/engine/routing_algorithms/alternative_path.cpp
Normal file
770
src/engine/routing_algorithms/alternative_path.cpp
Normal file
@ -0,0 +1,770 @@
|
||||
#include "engine/routing_algorithms/alternative_path.hpp"
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace engine
|
||||
{
|
||||
namespace routing_algorithms
|
||||
{
|
||||
|
||||
void AlternativeRouting::operator()(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const PhantomNodes &phantom_node_pair,
|
||||
InternalRouteResult &raw_route_data)
|
||||
{
|
||||
std::vector<NodeID> alternative_path;
|
||||
std::vector<NodeID> via_node_candidate_list;
|
||||
std::vector<SearchSpaceEdge> forward_search_space;
|
||||
std::vector<SearchSpaceEdge> reverse_search_space;
|
||||
|
||||
// Init queues, semi-expensive because access to TSS invokes a sys-call
|
||||
engine_working_data.InitializeOrClearFirstThreadLocalStorage(facade->GetNumberOfNodes());
|
||||
engine_working_data.InitializeOrClearSecondThreadLocalStorage(facade->GetNumberOfNodes());
|
||||
engine_working_data.InitializeOrClearThirdThreadLocalStorage(facade->GetNumberOfNodes());
|
||||
|
||||
QueryHeap &forward_heap1 = *(engine_working_data.forward_heap_1);
|
||||
QueryHeap &reverse_heap1 = *(engine_working_data.reverse_heap_1);
|
||||
QueryHeap &forward_heap2 = *(engine_working_data.forward_heap_2);
|
||||
QueryHeap &reverse_heap2 = *(engine_working_data.reverse_heap_2);
|
||||
|
||||
int upper_bound_to_shortest_path_weight = INVALID_EDGE_WEIGHT;
|
||||
NodeID middle_node = SPECIAL_NODEID;
|
||||
const EdgeWeight min_edge_offset =
|
||||
std::min(phantom_node_pair.source_phantom.forward_segment_id.enabled
|
||||
? -phantom_node_pair.source_phantom.GetForwardWeightPlusOffset()
|
||||
: 0,
|
||||
phantom_node_pair.source_phantom.reverse_segment_id.enabled
|
||||
? -phantom_node_pair.source_phantom.GetReverseWeightPlusOffset()
|
||||
: 0);
|
||||
|
||||
if (phantom_node_pair.source_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
BOOST_ASSERT(phantom_node_pair.source_phantom.forward_segment_id.id != SPECIAL_SEGMENTID);
|
||||
forward_heap1.Insert(phantom_node_pair.source_phantom.forward_segment_id.id,
|
||||
-phantom_node_pair.source_phantom.GetForwardWeightPlusOffset(),
|
||||
phantom_node_pair.source_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (phantom_node_pair.source_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
BOOST_ASSERT(phantom_node_pair.source_phantom.reverse_segment_id.id != SPECIAL_SEGMENTID);
|
||||
forward_heap1.Insert(phantom_node_pair.source_phantom.reverse_segment_id.id,
|
||||
-phantom_node_pair.source_phantom.GetReverseWeightPlusOffset(),
|
||||
phantom_node_pair.source_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
if (phantom_node_pair.target_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
BOOST_ASSERT(phantom_node_pair.target_phantom.forward_segment_id.id != SPECIAL_SEGMENTID);
|
||||
reverse_heap1.Insert(phantom_node_pair.target_phantom.forward_segment_id.id,
|
||||
phantom_node_pair.target_phantom.GetForwardWeightPlusOffset(),
|
||||
phantom_node_pair.target_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (phantom_node_pair.target_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
BOOST_ASSERT(phantom_node_pair.target_phantom.reverse_segment_id.id != SPECIAL_SEGMENTID);
|
||||
reverse_heap1.Insert(phantom_node_pair.target_phantom.reverse_segment_id.id,
|
||||
phantom_node_pair.target_phantom.GetReverseWeightPlusOffset(),
|
||||
phantom_node_pair.target_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
// search from s and t till new_min/(1+epsilon) > length_of_shortest_path
|
||||
while (0 < (forward_heap1.Size() + reverse_heap1.Size()))
|
||||
{
|
||||
if (0 < forward_heap1.Size())
|
||||
{
|
||||
AlternativeRoutingStep<true>(facade,
|
||||
forward_heap1,
|
||||
reverse_heap1,
|
||||
&middle_node,
|
||||
&upper_bound_to_shortest_path_weight,
|
||||
via_node_candidate_list,
|
||||
forward_search_space,
|
||||
min_edge_offset);
|
||||
}
|
||||
if (0 < reverse_heap1.Size())
|
||||
{
|
||||
AlternativeRoutingStep<false>(facade,
|
||||
forward_heap1,
|
||||
reverse_heap1,
|
||||
&middle_node,
|
||||
&upper_bound_to_shortest_path_weight,
|
||||
via_node_candidate_list,
|
||||
reverse_search_space,
|
||||
min_edge_offset);
|
||||
}
|
||||
}
|
||||
|
||||
if (INVALID_EDGE_WEIGHT == upper_bound_to_shortest_path_weight)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
std::sort(begin(via_node_candidate_list), end(via_node_candidate_list));
|
||||
auto unique_end = std::unique(begin(via_node_candidate_list), end(via_node_candidate_list));
|
||||
via_node_candidate_list.resize(unique_end - begin(via_node_candidate_list));
|
||||
|
||||
std::vector<NodeID> packed_forward_path;
|
||||
std::vector<NodeID> packed_reverse_path;
|
||||
|
||||
const bool path_is_a_loop =
|
||||
upper_bound_to_shortest_path_weight !=
|
||||
forward_heap1.GetKey(middle_node) + reverse_heap1.GetKey(middle_node);
|
||||
if (path_is_a_loop)
|
||||
{
|
||||
// Self Loop
|
||||
packed_forward_path.push_back(middle_node);
|
||||
packed_forward_path.push_back(middle_node);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
super::RetrievePackedPathFromSingleHeap(forward_heap1, middle_node, packed_forward_path);
|
||||
super::RetrievePackedPathFromSingleHeap(reverse_heap1, middle_node, packed_reverse_path);
|
||||
}
|
||||
|
||||
// this set is is used as an indicator if a node is on the shortest path
|
||||
std::unordered_set<NodeID> nodes_in_path(packed_forward_path.size() +
|
||||
packed_reverse_path.size());
|
||||
nodes_in_path.insert(packed_forward_path.begin(), packed_forward_path.end());
|
||||
nodes_in_path.insert(middle_node);
|
||||
nodes_in_path.insert(packed_reverse_path.begin(), packed_reverse_path.end());
|
||||
|
||||
std::unordered_map<NodeID, int> approximated_forward_sharing;
|
||||
std::unordered_map<NodeID, int> approximated_reverse_sharing;
|
||||
|
||||
// sweep over search space, compute forward sharing for each current edge (u,v)
|
||||
for (const SearchSpaceEdge ¤t_edge : forward_search_space)
|
||||
{
|
||||
const NodeID u = current_edge.first;
|
||||
const NodeID v = current_edge.second;
|
||||
|
||||
if (nodes_in_path.find(v) != nodes_in_path.end())
|
||||
{
|
||||
// current_edge is on shortest path => sharing(v):=queue.GetKey(v);
|
||||
approximated_forward_sharing.emplace(v, forward_heap1.GetKey(v));
|
||||
}
|
||||
else
|
||||
{
|
||||
// current edge is not on shortest path. Check if we know a value for the other
|
||||
// endpoint
|
||||
const auto sharing_of_u_iterator = approximated_forward_sharing.find(u);
|
||||
if (sharing_of_u_iterator != approximated_forward_sharing.end())
|
||||
{
|
||||
approximated_forward_sharing.emplace(v, sharing_of_u_iterator->second);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sweep over search space, compute backward sharing
|
||||
for (const SearchSpaceEdge ¤t_edge : reverse_search_space)
|
||||
{
|
||||
const NodeID u = current_edge.first;
|
||||
const NodeID v = current_edge.second;
|
||||
if (nodes_in_path.find(v) != nodes_in_path.end())
|
||||
{
|
||||
// current_edge is on shortest path => sharing(u):=queue.GetKey(u);
|
||||
approximated_reverse_sharing.emplace(v, reverse_heap1.GetKey(v));
|
||||
}
|
||||
else
|
||||
{
|
||||
// current edge is not on shortest path. Check if we know a value for the other
|
||||
// endpoint
|
||||
const auto sharing_of_u_iterator = approximated_reverse_sharing.find(u);
|
||||
if (sharing_of_u_iterator != approximated_reverse_sharing.end())
|
||||
{
|
||||
approximated_reverse_sharing.emplace(v, sharing_of_u_iterator->second);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// util::Log(logDEBUG) << "fwd_search_space size: " <<
|
||||
// forward_search_space.size() << ", marked " << approximated_forward_sharing.size() << "
|
||||
// nodes";
|
||||
// util::Log(logDEBUG) << "rev_search_space size: " <<
|
||||
// reverse_search_space.size() << ", marked " << approximated_reverse_sharing.size() << "
|
||||
// nodes";
|
||||
|
||||
std::vector<NodeID> preselected_node_list;
|
||||
for (const NodeID node : via_node_candidate_list)
|
||||
{
|
||||
if (node == middle_node)
|
||||
continue;
|
||||
const auto fwd_iterator = approximated_forward_sharing.find(node);
|
||||
const int fwd_sharing =
|
||||
(fwd_iterator != approximated_forward_sharing.end()) ? fwd_iterator->second : 0;
|
||||
const auto rev_iterator = approximated_reverse_sharing.find(node);
|
||||
const int rev_sharing =
|
||||
(rev_iterator != approximated_reverse_sharing.end()) ? rev_iterator->second : 0;
|
||||
|
||||
const int approximated_sharing = fwd_sharing + rev_sharing;
|
||||
const int approximated_length = forward_heap1.GetKey(node) + reverse_heap1.GetKey(node);
|
||||
const bool length_passes =
|
||||
(approximated_length < upper_bound_to_shortest_path_weight * (1 + VIAPATH_EPSILON));
|
||||
const bool sharing_passes =
|
||||
(approximated_sharing <= upper_bound_to_shortest_path_weight * VIAPATH_GAMMA);
|
||||
const bool stretch_passes =
|
||||
(approximated_length - approximated_sharing) <
|
||||
((1. + VIAPATH_ALPHA) * (upper_bound_to_shortest_path_weight - approximated_sharing));
|
||||
|
||||
if (length_passes && sharing_passes && stretch_passes)
|
||||
{
|
||||
preselected_node_list.emplace_back(node);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<NodeID> &packed_shortest_path = packed_forward_path;
|
||||
if (!path_is_a_loop)
|
||||
{
|
||||
std::reverse(packed_shortest_path.begin(), packed_shortest_path.end());
|
||||
packed_shortest_path.emplace_back(middle_node);
|
||||
packed_shortest_path.insert(
|
||||
packed_shortest_path.end(), packed_reverse_path.begin(), packed_reverse_path.end());
|
||||
}
|
||||
std::vector<RankedCandidateNode> ranked_candidates_list;
|
||||
|
||||
// prioritizing via nodes for deep inspection
|
||||
for (const NodeID node : preselected_node_list)
|
||||
{
|
||||
int length_of_via_path = 0, sharing_of_via_path = 0;
|
||||
ComputeLengthAndSharingOfViaPath(facade,
|
||||
node,
|
||||
&length_of_via_path,
|
||||
&sharing_of_via_path,
|
||||
packed_shortest_path,
|
||||
min_edge_offset);
|
||||
const int maximum_allowed_sharing =
|
||||
static_cast<int>(upper_bound_to_shortest_path_weight * VIAPATH_GAMMA);
|
||||
if (sharing_of_via_path <= maximum_allowed_sharing &&
|
||||
length_of_via_path <= upper_bound_to_shortest_path_weight * (1 + VIAPATH_EPSILON))
|
||||
{
|
||||
ranked_candidates_list.emplace_back(node, length_of_via_path, sharing_of_via_path);
|
||||
}
|
||||
}
|
||||
std::sort(ranked_candidates_list.begin(), ranked_candidates_list.end());
|
||||
|
||||
NodeID selected_via_node = SPECIAL_NODEID;
|
||||
int length_of_via_path = INVALID_EDGE_WEIGHT;
|
||||
NodeID s_v_middle = SPECIAL_NODEID, v_t_middle = SPECIAL_NODEID;
|
||||
for (const RankedCandidateNode &candidate : ranked_candidates_list)
|
||||
{
|
||||
if (ViaNodeCandidatePassesTTest(facade,
|
||||
forward_heap1,
|
||||
reverse_heap1,
|
||||
forward_heap2,
|
||||
reverse_heap2,
|
||||
candidate,
|
||||
upper_bound_to_shortest_path_weight,
|
||||
&length_of_via_path,
|
||||
&s_v_middle,
|
||||
&v_t_middle,
|
||||
min_edge_offset))
|
||||
{
|
||||
// select first admissable
|
||||
selected_via_node = candidate.node;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Unpack shortest path and alternative, if they exist
|
||||
if (INVALID_EDGE_WEIGHT != upper_bound_to_shortest_path_weight)
|
||||
{
|
||||
BOOST_ASSERT(!packed_shortest_path.empty());
|
||||
raw_route_data.unpacked_path_segments.resize(1);
|
||||
raw_route_data.source_traversed_in_reverse.push_back(
|
||||
(packed_shortest_path.front() !=
|
||||
phantom_node_pair.source_phantom.forward_segment_id.id));
|
||||
raw_route_data.target_traversed_in_reverse.push_back((
|
||||
packed_shortest_path.back() != phantom_node_pair.target_phantom.forward_segment_id.id));
|
||||
|
||||
super::UnpackPath(facade,
|
||||
// -- packed input
|
||||
packed_shortest_path.begin(),
|
||||
packed_shortest_path.end(),
|
||||
// -- start of route
|
||||
phantom_node_pair,
|
||||
// -- unpacked output
|
||||
raw_route_data.unpacked_path_segments.front());
|
||||
raw_route_data.shortest_path_length = upper_bound_to_shortest_path_weight;
|
||||
}
|
||||
|
||||
if (SPECIAL_NODEID != selected_via_node)
|
||||
{
|
||||
std::vector<NodeID> packed_alternate_path;
|
||||
// retrieve alternate path
|
||||
RetrievePackedAlternatePath(forward_heap1,
|
||||
reverse_heap1,
|
||||
forward_heap2,
|
||||
reverse_heap2,
|
||||
s_v_middle,
|
||||
v_t_middle,
|
||||
packed_alternate_path);
|
||||
|
||||
raw_route_data.alt_source_traversed_in_reverse.push_back(
|
||||
(packed_alternate_path.front() !=
|
||||
phantom_node_pair.source_phantom.forward_segment_id.id));
|
||||
raw_route_data.alt_target_traversed_in_reverse.push_back(
|
||||
(packed_alternate_path.back() !=
|
||||
phantom_node_pair.target_phantom.forward_segment_id.id));
|
||||
|
||||
// unpack the alternate path
|
||||
super::UnpackPath(facade,
|
||||
packed_alternate_path.begin(),
|
||||
packed_alternate_path.end(),
|
||||
phantom_node_pair,
|
||||
raw_route_data.unpacked_alternative);
|
||||
|
||||
raw_route_data.alternative_path_length = length_of_via_path;
|
||||
}
|
||||
else
|
||||
{
|
||||
BOOST_ASSERT(raw_route_data.alternative_path_length == INVALID_EDGE_WEIGHT);
|
||||
}
|
||||
}
|
||||
|
||||
void AlternativeRouting::RetrievePackedAlternatePath(const QueryHeap &forward_heap1,
|
||||
const QueryHeap &reverse_heap1,
|
||||
const QueryHeap &forward_heap2,
|
||||
const QueryHeap &reverse_heap2,
|
||||
const NodeID s_v_middle,
|
||||
const NodeID v_t_middle,
|
||||
std::vector<NodeID> &packed_path) const
|
||||
{
|
||||
// fetch packed path [s,v)
|
||||
std::vector<NodeID> packed_v_t_path;
|
||||
super::RetrievePackedPathFromHeap(forward_heap1, reverse_heap2, s_v_middle, packed_path);
|
||||
packed_path.pop_back(); // remove middle node. It's in both half-paths
|
||||
|
||||
// fetch patched path [v,t]
|
||||
super::RetrievePackedPathFromHeap(forward_heap2, reverse_heap1, v_t_middle, packed_v_t_path);
|
||||
|
||||
packed_path.insert(packed_path.end(), packed_v_t_path.begin(), packed_v_t_path.end());
|
||||
}
|
||||
|
||||
// TODO: reorder parameters
|
||||
// compute and unpack <s,..,v> and <v,..,t> by exploring search spaces
|
||||
// from v and intersecting against queues. only half-searches have to be
|
||||
// done at this stage
|
||||
void AlternativeRouting::ComputeLengthAndSharingOfViaPath(
|
||||
const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const NodeID via_node,
|
||||
int *real_length_of_via_path,
|
||||
int *sharing_of_via_path,
|
||||
const std::vector<NodeID> &packed_shortest_path,
|
||||
const EdgeWeight min_edge_offset)
|
||||
{
|
||||
engine_working_data.InitializeOrClearSecondThreadLocalStorage(facade->GetNumberOfNodes());
|
||||
|
||||
QueryHeap &existing_forward_heap = *engine_working_data.forward_heap_1;
|
||||
QueryHeap &existing_reverse_heap = *engine_working_data.reverse_heap_1;
|
||||
QueryHeap &new_forward_heap = *engine_working_data.forward_heap_2;
|
||||
QueryHeap &new_reverse_heap = *engine_working_data.reverse_heap_2;
|
||||
|
||||
std::vector<NodeID> packed_s_v_path;
|
||||
std::vector<NodeID> packed_v_t_path;
|
||||
|
||||
std::vector<NodeID> partially_unpacked_shortest_path;
|
||||
std::vector<NodeID> partially_unpacked_via_path;
|
||||
|
||||
NodeID s_v_middle = SPECIAL_NODEID;
|
||||
int upper_bound_s_v_path_length = INVALID_EDGE_WEIGHT;
|
||||
new_reverse_heap.Insert(via_node, 0, via_node);
|
||||
// compute path <s,..,v> by reusing forward search from s
|
||||
const bool constexpr STALLING_ENABLED = true;
|
||||
const bool constexpr DO_NOT_FORCE_LOOPS = false;
|
||||
while (!new_reverse_heap.Empty())
|
||||
{
|
||||
super::RoutingStep(facade,
|
||||
new_reverse_heap,
|
||||
existing_forward_heap,
|
||||
s_v_middle,
|
||||
upper_bound_s_v_path_length,
|
||||
min_edge_offset,
|
||||
false,
|
||||
STALLING_ENABLED,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
// compute path <v,..,t> by reusing backward search from node t
|
||||
NodeID v_t_middle = SPECIAL_NODEID;
|
||||
int upper_bound_of_v_t_path_length = INVALID_EDGE_WEIGHT;
|
||||
new_forward_heap.Insert(via_node, 0, via_node);
|
||||
while (!new_forward_heap.Empty())
|
||||
{
|
||||
super::RoutingStep(facade,
|
||||
new_forward_heap,
|
||||
existing_reverse_heap,
|
||||
v_t_middle,
|
||||
upper_bound_of_v_t_path_length,
|
||||
min_edge_offset,
|
||||
true,
|
||||
STALLING_ENABLED,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
*real_length_of_via_path = upper_bound_s_v_path_length + upper_bound_of_v_t_path_length;
|
||||
|
||||
if (SPECIAL_NODEID == s_v_middle || SPECIAL_NODEID == v_t_middle)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
// retrieve packed paths
|
||||
super::RetrievePackedPathFromHeap(
|
||||
existing_forward_heap, new_reverse_heap, s_v_middle, packed_s_v_path);
|
||||
super::RetrievePackedPathFromHeap(
|
||||
new_forward_heap, existing_reverse_heap, v_t_middle, packed_v_t_path);
|
||||
|
||||
// partial unpacking, compute sharing
|
||||
// First partially unpack s-->v until paths deviate, note length of common path.
|
||||
const auto s_v_min_path_size =
|
||||
std::min(packed_s_v_path.size(), packed_shortest_path.size()) - 1;
|
||||
for (const auto current_node : util::irange<std::size_t>(0UL, s_v_min_path_size))
|
||||
{
|
||||
if (packed_s_v_path[current_node] == packed_shortest_path[current_node] &&
|
||||
packed_s_v_path[current_node + 1] == packed_shortest_path[current_node + 1])
|
||||
{
|
||||
EdgeID edgeID = facade->FindEdgeInEitherDirection(packed_s_v_path[current_node],
|
||||
packed_s_v_path[current_node + 1]);
|
||||
*sharing_of_via_path += facade->GetEdgeData(edgeID).weight;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (packed_s_v_path[current_node] == packed_shortest_path[current_node])
|
||||
{
|
||||
super::UnpackEdge(facade,
|
||||
packed_s_v_path[current_node],
|
||||
packed_s_v_path[current_node + 1],
|
||||
partially_unpacked_via_path);
|
||||
super::UnpackEdge(facade,
|
||||
packed_shortest_path[current_node],
|
||||
packed_shortest_path[current_node + 1],
|
||||
partially_unpacked_shortest_path);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// traverse partially unpacked edge and note common prefix
|
||||
const int64_t packed_path_length =
|
||||
static_cast<int64_t>(
|
||||
std::min(partially_unpacked_via_path.size(), partially_unpacked_shortest_path.size())) -
|
||||
1;
|
||||
for (int64_t current_node = 0; (current_node < packed_path_length) &&
|
||||
(partially_unpacked_via_path[current_node] ==
|
||||
partially_unpacked_shortest_path[current_node] &&
|
||||
partially_unpacked_via_path[current_node + 1] ==
|
||||
partially_unpacked_shortest_path[current_node + 1]);
|
||||
++current_node)
|
||||
{
|
||||
EdgeID selected_edge =
|
||||
facade->FindEdgeInEitherDirection(partially_unpacked_via_path[current_node],
|
||||
partially_unpacked_via_path[current_node + 1]);
|
||||
*sharing_of_via_path += facade->GetEdgeData(selected_edge).weight;
|
||||
}
|
||||
|
||||
// Second, partially unpack v-->t in reverse order until paths deviate and note lengths
|
||||
int64_t via_path_index = static_cast<int64_t>(packed_v_t_path.size()) - 1;
|
||||
int64_t shortest_path_index = static_cast<int64_t>(packed_shortest_path.size()) - 1;
|
||||
for (; via_path_index > 0 && shortest_path_index > 0; --via_path_index, --shortest_path_index)
|
||||
{
|
||||
if (packed_v_t_path[via_path_index - 1] == packed_shortest_path[shortest_path_index - 1] &&
|
||||
packed_v_t_path[via_path_index] == packed_shortest_path[shortest_path_index])
|
||||
{
|
||||
EdgeID edgeID = facade->FindEdgeInEitherDirection(packed_v_t_path[via_path_index - 1],
|
||||
packed_v_t_path[via_path_index]);
|
||||
*sharing_of_via_path += facade->GetEdgeData(edgeID).weight;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (packed_v_t_path[via_path_index] == packed_shortest_path[shortest_path_index])
|
||||
{
|
||||
super::UnpackEdge(facade,
|
||||
packed_v_t_path[via_path_index - 1],
|
||||
packed_v_t_path[via_path_index],
|
||||
partially_unpacked_via_path);
|
||||
super::UnpackEdge(facade,
|
||||
packed_shortest_path[shortest_path_index - 1],
|
||||
packed_shortest_path[shortest_path_index],
|
||||
partially_unpacked_shortest_path);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
via_path_index = static_cast<int64_t>(partially_unpacked_via_path.size()) - 1;
|
||||
shortest_path_index = static_cast<int64_t>(partially_unpacked_shortest_path.size()) - 1;
|
||||
for (; via_path_index > 0 && shortest_path_index > 0; --via_path_index, --shortest_path_index)
|
||||
{
|
||||
if (partially_unpacked_via_path[via_path_index - 1] ==
|
||||
partially_unpacked_shortest_path[shortest_path_index - 1] &&
|
||||
partially_unpacked_via_path[via_path_index] ==
|
||||
partially_unpacked_shortest_path[shortest_path_index])
|
||||
{
|
||||
EdgeID edgeID =
|
||||
facade->FindEdgeInEitherDirection(partially_unpacked_via_path[via_path_index - 1],
|
||||
partially_unpacked_via_path[via_path_index]);
|
||||
*sharing_of_via_path += facade->GetEdgeData(edgeID).weight;
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
// finished partial unpacking spree! Amount of sharing is stored to appropriate pointer
|
||||
// variable
|
||||
}
|
||||
|
||||
// conduct T-Test
|
||||
bool AlternativeRouting::ViaNodeCandidatePassesTTest(
|
||||
const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
QueryHeap &existing_forward_heap,
|
||||
QueryHeap &existing_reverse_heap,
|
||||
QueryHeap &new_forward_heap,
|
||||
QueryHeap &new_reverse_heap,
|
||||
const RankedCandidateNode &candidate,
|
||||
const int length_of_shortest_path,
|
||||
int *length_of_via_path,
|
||||
NodeID *s_v_middle,
|
||||
NodeID *v_t_middle,
|
||||
const EdgeWeight min_edge_offset) const
|
||||
{
|
||||
new_forward_heap.Clear();
|
||||
new_reverse_heap.Clear();
|
||||
std::vector<NodeID> packed_s_v_path;
|
||||
std::vector<NodeID> packed_v_t_path;
|
||||
|
||||
*s_v_middle = SPECIAL_NODEID;
|
||||
int upper_bound_s_v_path_length = INVALID_EDGE_WEIGHT;
|
||||
// compute path <s,..,v> by reusing forward search from s
|
||||
new_reverse_heap.Insert(candidate.node, 0, candidate.node);
|
||||
const bool constexpr STALLING_ENABLED = true;
|
||||
const bool constexpr DO_NOT_FORCE_LOOPS = false;
|
||||
while (new_reverse_heap.Size() > 0)
|
||||
{
|
||||
super::RoutingStep(facade,
|
||||
new_reverse_heap,
|
||||
existing_forward_heap,
|
||||
*s_v_middle,
|
||||
upper_bound_s_v_path_length,
|
||||
min_edge_offset,
|
||||
false,
|
||||
STALLING_ENABLED,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
|
||||
if (INVALID_EDGE_WEIGHT == upper_bound_s_v_path_length)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// compute path <v,..,t> by reusing backward search from t
|
||||
*v_t_middle = SPECIAL_NODEID;
|
||||
int upper_bound_of_v_t_path_length = INVALID_EDGE_WEIGHT;
|
||||
new_forward_heap.Insert(candidate.node, 0, candidate.node);
|
||||
while (new_forward_heap.Size() > 0)
|
||||
{
|
||||
super::RoutingStep(facade,
|
||||
new_forward_heap,
|
||||
existing_reverse_heap,
|
||||
*v_t_middle,
|
||||
upper_bound_of_v_t_path_length,
|
||||
min_edge_offset,
|
||||
true,
|
||||
STALLING_ENABLED,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
|
||||
if (INVALID_EDGE_WEIGHT == upper_bound_of_v_t_path_length)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
*length_of_via_path = upper_bound_s_v_path_length + upper_bound_of_v_t_path_length;
|
||||
|
||||
// retrieve packed paths
|
||||
super::RetrievePackedPathFromHeap(
|
||||
existing_forward_heap, new_reverse_heap, *s_v_middle, packed_s_v_path);
|
||||
|
||||
super::RetrievePackedPathFromHeap(
|
||||
new_forward_heap, existing_reverse_heap, *v_t_middle, packed_v_t_path);
|
||||
|
||||
NodeID s_P = *s_v_middle, t_P = *v_t_middle;
|
||||
if (SPECIAL_NODEID == s_P)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (SPECIAL_NODEID == t_P)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
const int T_threshold = static_cast<int>(VIAPATH_EPSILON * length_of_shortest_path);
|
||||
int unpacked_until_weight = 0;
|
||||
|
||||
std::stack<SearchSpaceEdge> unpack_stack;
|
||||
// Traverse path s-->v
|
||||
for (std::size_t i = packed_s_v_path.size() - 1; (i > 0) && unpack_stack.empty(); --i)
|
||||
{
|
||||
const EdgeID current_edge_id =
|
||||
facade->FindEdgeInEitherDirection(packed_s_v_path[i - 1], packed_s_v_path[i]);
|
||||
const int length_of_current_edge = facade->GetEdgeData(current_edge_id).weight;
|
||||
if ((length_of_current_edge + unpacked_until_weight) >= T_threshold)
|
||||
{
|
||||
unpack_stack.emplace(packed_s_v_path[i - 1], packed_s_v_path[i]);
|
||||
}
|
||||
else
|
||||
{
|
||||
unpacked_until_weight += length_of_current_edge;
|
||||
s_P = packed_s_v_path[i - 1];
|
||||
}
|
||||
}
|
||||
|
||||
while (!unpack_stack.empty())
|
||||
{
|
||||
const SearchSpaceEdge via_path_edge = unpack_stack.top();
|
||||
unpack_stack.pop();
|
||||
EdgeID edge_in_via_path_id =
|
||||
facade->FindEdgeInEitherDirection(via_path_edge.first, via_path_edge.second);
|
||||
|
||||
if (SPECIAL_EDGEID == edge_in_via_path_id)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
const EdgeData ¤t_edge_data = facade->GetEdgeData(edge_in_via_path_id);
|
||||
const bool current_edge_is_shortcut = current_edge_data.shortcut;
|
||||
if (current_edge_is_shortcut)
|
||||
{
|
||||
const NodeID via_path_middle_node_id = current_edge_data.id;
|
||||
const EdgeID second_segment_edge_id =
|
||||
facade->FindEdgeInEitherDirection(via_path_middle_node_id, via_path_edge.second);
|
||||
const int second_segment_length = facade->GetEdgeData(second_segment_edge_id).weight;
|
||||
// attention: !unpacking in reverse!
|
||||
// Check if second segment is the one to go over treshold? if yes add second segment
|
||||
// to stack, else push first segment to stack and add weight of second one.
|
||||
if (unpacked_until_weight + second_segment_length >= T_threshold)
|
||||
{
|
||||
unpack_stack.emplace(via_path_middle_node_id, via_path_edge.second);
|
||||
}
|
||||
else
|
||||
{
|
||||
unpacked_until_weight += second_segment_length;
|
||||
unpack_stack.emplace(via_path_edge.first, via_path_middle_node_id);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// edge is not a shortcut, set the start node for T-Test to end of edge.
|
||||
unpacked_until_weight += current_edge_data.weight;
|
||||
s_P = via_path_edge.first;
|
||||
}
|
||||
}
|
||||
|
||||
int t_test_path_length = unpacked_until_weight;
|
||||
unpacked_until_weight = 0;
|
||||
// Traverse path s-->v
|
||||
BOOST_ASSERT(!packed_v_t_path.empty());
|
||||
for (unsigned i = 0, packed_path_length = static_cast<unsigned>(packed_v_t_path.size() - 1);
|
||||
(i < packed_path_length) && unpack_stack.empty();
|
||||
++i)
|
||||
{
|
||||
const EdgeID edgeID =
|
||||
facade->FindEdgeInEitherDirection(packed_v_t_path[i], packed_v_t_path[i + 1]);
|
||||
int length_of_current_edge = facade->GetEdgeData(edgeID).weight;
|
||||
if (length_of_current_edge + unpacked_until_weight >= T_threshold)
|
||||
{
|
||||
unpack_stack.emplace(packed_v_t_path[i], packed_v_t_path[i + 1]);
|
||||
}
|
||||
else
|
||||
{
|
||||
unpacked_until_weight += length_of_current_edge;
|
||||
t_P = packed_v_t_path[i + 1];
|
||||
}
|
||||
}
|
||||
|
||||
while (!unpack_stack.empty())
|
||||
{
|
||||
const SearchSpaceEdge via_path_edge = unpack_stack.top();
|
||||
unpack_stack.pop();
|
||||
EdgeID edge_in_via_path_id =
|
||||
facade->FindEdgeInEitherDirection(via_path_edge.first, via_path_edge.second);
|
||||
if (SPECIAL_EDGEID == edge_in_via_path_id)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
const EdgeData ¤t_edge_data = facade->GetEdgeData(edge_in_via_path_id);
|
||||
const bool IsViaEdgeShortCut = current_edge_data.shortcut;
|
||||
if (IsViaEdgeShortCut)
|
||||
{
|
||||
const NodeID middleOfViaPath = current_edge_data.id;
|
||||
EdgeID edgeIDOfFirstSegment =
|
||||
facade->FindEdgeInEitherDirection(via_path_edge.first, middleOfViaPath);
|
||||
int lengthOfFirstSegment = facade->GetEdgeData(edgeIDOfFirstSegment).weight;
|
||||
// Check if first segment is the one to go over treshold? if yes first segment to
|
||||
// stack, else push second segment to stack and add weight of first one.
|
||||
if (unpacked_until_weight + lengthOfFirstSegment >= T_threshold)
|
||||
{
|
||||
unpack_stack.emplace(via_path_edge.first, middleOfViaPath);
|
||||
}
|
||||
else
|
||||
{
|
||||
unpacked_until_weight += lengthOfFirstSegment;
|
||||
unpack_stack.emplace(middleOfViaPath, via_path_edge.second);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// edge is not a shortcut, set the start node for T-Test to end of edge.
|
||||
unpacked_until_weight += current_edge_data.weight;
|
||||
t_P = via_path_edge.second;
|
||||
}
|
||||
}
|
||||
|
||||
t_test_path_length += unpacked_until_weight;
|
||||
// Run actual T-Test query and compare if weight equal.
|
||||
engine_working_data.InitializeOrClearThirdThreadLocalStorage(facade->GetNumberOfNodes());
|
||||
|
||||
QueryHeap &forward_heap3 = *engine_working_data.forward_heap_3;
|
||||
QueryHeap &reverse_heap3 = *engine_working_data.reverse_heap_3;
|
||||
int upper_bound = INVALID_EDGE_WEIGHT;
|
||||
NodeID middle = SPECIAL_NODEID;
|
||||
|
||||
forward_heap3.Insert(s_P, 0, s_P);
|
||||
reverse_heap3.Insert(t_P, 0, t_P);
|
||||
// exploration from s and t until deletemin/(1+epsilon) > _lengt_oO_sShortest_path
|
||||
while ((forward_heap3.Size() + reverse_heap3.Size()) > 0)
|
||||
{
|
||||
if (!forward_heap3.Empty())
|
||||
{
|
||||
super::RoutingStep(facade,
|
||||
forward_heap3,
|
||||
reverse_heap3,
|
||||
middle,
|
||||
upper_bound,
|
||||
min_edge_offset,
|
||||
true,
|
||||
STALLING_ENABLED,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
if (!reverse_heap3.Empty())
|
||||
{
|
||||
super::RoutingStep(facade,
|
||||
reverse_heap3,
|
||||
forward_heap3,
|
||||
middle,
|
||||
upper_bound,
|
||||
min_edge_offset,
|
||||
false,
|
||||
STALLING_ENABLED,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
}
|
||||
return (upper_bound <= t_test_path_length);
|
||||
}
|
||||
|
||||
} // namespace routing_algorithms
|
||||
} // namespace engine
|
||||
} // namespace osrm}
|
125
src/engine/routing_algorithms/direct_shortest_path.cpp
Normal file
125
src/engine/routing_algorithms/direct_shortest_path.cpp
Normal file
@ -0,0 +1,125 @@
|
||||
#include "engine/routing_algorithms/direct_shortest_path.hpp"
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace engine
|
||||
{
|
||||
namespace routing_algorithms
|
||||
{
|
||||
|
||||
/// This is a striped down version of the general shortest path algorithm.
|
||||
/// The general algorithm always computes two queries for each leg. This is only
|
||||
/// necessary in case of vias, where the directions of the start node is constrainted
|
||||
/// by the previous route.
|
||||
/// This variation is only an optimazation for graphs with slow queries, for example
|
||||
/// not fully contracted graphs.
|
||||
void DirectShortestPathRouting::
|
||||
operator()(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const std::vector<PhantomNodes> &phantom_nodes_vector,
|
||||
InternalRouteResult &raw_route_data) const
|
||||
{
|
||||
// Get weight to next pair of target nodes.
|
||||
BOOST_ASSERT_MSG(1 == phantom_nodes_vector.size(),
|
||||
"Direct Shortest Path Query only accepts a single source and target pair. "
|
||||
"Multiple ones have been specified.");
|
||||
const auto &phantom_node_pair = phantom_nodes_vector.front();
|
||||
const auto &source_phantom = phantom_node_pair.source_phantom;
|
||||
const auto &target_phantom = phantom_node_pair.target_phantom;
|
||||
|
||||
engine_working_data.InitializeOrClearFirstThreadLocalStorage(facade->GetNumberOfNodes());
|
||||
QueryHeap &forward_heap = *(engine_working_data.forward_heap_1);
|
||||
QueryHeap &reverse_heap = *(engine_working_data.reverse_heap_1);
|
||||
forward_heap.Clear();
|
||||
reverse_heap.Clear();
|
||||
|
||||
BOOST_ASSERT(source_phantom.IsValid());
|
||||
BOOST_ASSERT(target_phantom.IsValid());
|
||||
|
||||
if (source_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.forward_segment_id.id,
|
||||
-source_phantom.GetForwardWeightPlusOffset(),
|
||||
source_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (source_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.reverse_segment_id.id,
|
||||
-source_phantom.GetReverseWeightPlusOffset(),
|
||||
source_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
if (target_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.forward_segment_id.id,
|
||||
target_phantom.GetForwardWeightPlusOffset(),
|
||||
target_phantom.forward_segment_id.id);
|
||||
}
|
||||
|
||||
if (target_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.reverse_segment_id.id,
|
||||
target_phantom.GetReverseWeightPlusOffset(),
|
||||
target_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
int weight = INVALID_EDGE_WEIGHT;
|
||||
std::vector<NodeID> packed_leg;
|
||||
|
||||
const bool constexpr DO_NOT_FORCE_LOOPS =
|
||||
false; // prevents forcing of loops, since offsets are set correctly
|
||||
|
||||
if (facade->GetCoreSize() > 0)
|
||||
{
|
||||
engine_working_data.InitializeOrClearSecondThreadLocalStorage(facade->GetNumberOfNodes());
|
||||
QueryHeap &forward_core_heap = *(engine_working_data.forward_heap_2);
|
||||
QueryHeap &reverse_core_heap = *(engine_working_data.reverse_heap_2);
|
||||
forward_core_heap.Clear();
|
||||
reverse_core_heap.Clear();
|
||||
|
||||
super::SearchWithCore(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
weight,
|
||||
packed_leg,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
else
|
||||
{
|
||||
super::Search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
weight,
|
||||
packed_leg,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS);
|
||||
}
|
||||
|
||||
// No path found for both target nodes?
|
||||
if (INVALID_EDGE_WEIGHT == weight)
|
||||
{
|
||||
raw_route_data.shortest_path_length = INVALID_EDGE_WEIGHT;
|
||||
raw_route_data.alternative_path_length = INVALID_EDGE_WEIGHT;
|
||||
return;
|
||||
}
|
||||
|
||||
BOOST_ASSERT_MSG(!packed_leg.empty(), "packed path empty");
|
||||
|
||||
raw_route_data.shortest_path_length = weight;
|
||||
raw_route_data.unpacked_path_segments.resize(1);
|
||||
raw_route_data.source_traversed_in_reverse.push_back(
|
||||
(packed_leg.front() != phantom_node_pair.source_phantom.forward_segment_id.id));
|
||||
raw_route_data.target_traversed_in_reverse.push_back(
|
||||
(packed_leg.back() != phantom_node_pair.target_phantom.forward_segment_id.id));
|
||||
|
||||
super::UnpackPath(facade,
|
||||
packed_leg.begin(),
|
||||
packed_leg.end(),
|
||||
phantom_node_pair,
|
||||
raw_route_data.unpacked_path_segments.front());
|
||||
}
|
||||
} // namespace routing_algorithms
|
||||
} // namespace engine
|
||||
} // namespace osrm
|
191
src/engine/routing_algorithms/many_to_many.cpp
Normal file
191
src/engine/routing_algorithms/many_to_many.cpp
Normal file
@ -0,0 +1,191 @@
|
||||
#include "engine/routing_algorithms/many_to_many.hpp"
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace engine
|
||||
{
|
||||
namespace routing_algorithms
|
||||
{
|
||||
|
||||
std::vector<EdgeWeight> ManyToManyRouting::
|
||||
operator()(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const std::vector<PhantomNode> &phantom_nodes,
|
||||
const std::vector<std::size_t> &source_indices,
|
||||
const std::vector<std::size_t> &target_indices) const
|
||||
{
|
||||
const auto number_of_sources =
|
||||
source_indices.empty() ? phantom_nodes.size() : source_indices.size();
|
||||
const auto number_of_targets =
|
||||
target_indices.empty() ? phantom_nodes.size() : target_indices.size();
|
||||
const auto number_of_entries = number_of_sources * number_of_targets;
|
||||
std::vector<EdgeWeight> result_table(number_of_entries, std::numeric_limits<EdgeWeight>::max());
|
||||
|
||||
engine_working_data.InitializeOrClearFirstThreadLocalStorage(facade->GetNumberOfNodes());
|
||||
|
||||
QueryHeap &query_heap = *(engine_working_data.forward_heap_1);
|
||||
|
||||
SearchSpaceWithBuckets search_space_with_buckets;
|
||||
|
||||
unsigned column_idx = 0;
|
||||
const auto search_target_phantom = [&](const PhantomNode &phantom) {
|
||||
query_heap.Clear();
|
||||
// insert target(s) at weight 0
|
||||
|
||||
if (phantom.forward_segment_id.enabled)
|
||||
{
|
||||
query_heap.Insert(phantom.forward_segment_id.id,
|
||||
phantom.GetForwardWeightPlusOffset(),
|
||||
phantom.forward_segment_id.id);
|
||||
}
|
||||
if (phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
query_heap.Insert(phantom.reverse_segment_id.id,
|
||||
phantom.GetReverseWeightPlusOffset(),
|
||||
phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
// explore search space
|
||||
while (!query_heap.Empty())
|
||||
{
|
||||
BackwardRoutingStep(facade, column_idx, query_heap, search_space_with_buckets);
|
||||
}
|
||||
++column_idx;
|
||||
};
|
||||
|
||||
// for each source do forward search
|
||||
unsigned row_idx = 0;
|
||||
const auto search_source_phantom = [&](const PhantomNode &phantom) {
|
||||
query_heap.Clear();
|
||||
// insert target(s) at weight 0
|
||||
|
||||
if (phantom.forward_segment_id.enabled)
|
||||
{
|
||||
query_heap.Insert(phantom.forward_segment_id.id,
|
||||
-phantom.GetForwardWeightPlusOffset(),
|
||||
phantom.forward_segment_id.id);
|
||||
}
|
||||
if (phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
query_heap.Insert(phantom.reverse_segment_id.id,
|
||||
-phantom.GetReverseWeightPlusOffset(),
|
||||
phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
// explore search space
|
||||
while (!query_heap.Empty())
|
||||
{
|
||||
ForwardRoutingStep(facade,
|
||||
row_idx,
|
||||
number_of_targets,
|
||||
query_heap,
|
||||
search_space_with_buckets,
|
||||
result_table);
|
||||
}
|
||||
++row_idx;
|
||||
};
|
||||
|
||||
if (target_indices.empty())
|
||||
{
|
||||
for (const auto &phantom : phantom_nodes)
|
||||
{
|
||||
search_target_phantom(phantom);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (const auto index : target_indices)
|
||||
{
|
||||
const auto &phantom = phantom_nodes[index];
|
||||
search_target_phantom(phantom);
|
||||
}
|
||||
}
|
||||
|
||||
if (source_indices.empty())
|
||||
{
|
||||
for (const auto &phantom : phantom_nodes)
|
||||
{
|
||||
search_source_phantom(phantom);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (const auto index : source_indices)
|
||||
{
|
||||
const auto &phantom = phantom_nodes[index];
|
||||
search_source_phantom(phantom);
|
||||
}
|
||||
}
|
||||
|
||||
return result_table;
|
||||
}
|
||||
|
||||
void ManyToManyRouting::ForwardRoutingStep(
|
||||
const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const unsigned row_idx,
|
||||
const unsigned number_of_targets,
|
||||
QueryHeap &query_heap,
|
||||
const SearchSpaceWithBuckets &search_space_with_buckets,
|
||||
std::vector<EdgeWeight> &result_table) const
|
||||
{
|
||||
const NodeID node = query_heap.DeleteMin();
|
||||
const int source_weight = query_heap.GetKey(node);
|
||||
|
||||
// check if each encountered node has an entry
|
||||
const auto bucket_iterator = search_space_with_buckets.find(node);
|
||||
// iterate bucket if there exists one
|
||||
if (bucket_iterator != search_space_with_buckets.end())
|
||||
{
|
||||
const std::vector<NodeBucket> &bucket_list = bucket_iterator->second;
|
||||
for (const NodeBucket ¤t_bucket : bucket_list)
|
||||
{
|
||||
// get target id from bucket entry
|
||||
const unsigned column_idx = current_bucket.target_id;
|
||||
const int target_weight = current_bucket.weight;
|
||||
auto ¤t_weight = result_table[row_idx * number_of_targets + column_idx];
|
||||
// check if new weight is better
|
||||
const EdgeWeight new_weight = source_weight + target_weight;
|
||||
if (new_weight < 0)
|
||||
{
|
||||
const EdgeWeight loop_weight = super::GetLoopWeight(facade, node);
|
||||
const int new_weight_with_loop = new_weight + loop_weight;
|
||||
if (loop_weight != INVALID_EDGE_WEIGHT && new_weight_with_loop >= 0)
|
||||
{
|
||||
current_weight = std::min(current_weight, new_weight_with_loop);
|
||||
}
|
||||
}
|
||||
else if (new_weight < current_weight)
|
||||
{
|
||||
result_table[row_idx * number_of_targets + column_idx] = new_weight;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (StallAtNode<true>(facade, node, source_weight, query_heap))
|
||||
{
|
||||
return;
|
||||
}
|
||||
RelaxOutgoingEdges<true>(facade, node, source_weight, query_heap);
|
||||
}
|
||||
|
||||
void ManyToManyRouting::BackwardRoutingStep(
|
||||
const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const unsigned column_idx,
|
||||
QueryHeap &query_heap,
|
||||
SearchSpaceWithBuckets &search_space_with_buckets) const
|
||||
{
|
||||
const NodeID node = query_heap.DeleteMin();
|
||||
const int target_weight = query_heap.GetKey(node);
|
||||
|
||||
// store settled nodes in search space bucket
|
||||
search_space_with_buckets[node].emplace_back(column_idx, target_weight);
|
||||
|
||||
if (StallAtNode<false>(facade, node, target_weight, query_heap))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
RelaxOutgoingEdges<false>(facade, node, target_weight, query_heap);
|
||||
}
|
||||
|
||||
} // namespace routing_algorithms
|
||||
} // namespace engine
|
||||
} // namespace osrm
|
379
src/engine/routing_algorithms/map_matching.cpp
Normal file
379
src/engine/routing_algorithms/map_matching.cpp
Normal file
@ -0,0 +1,379 @@
|
||||
#include "engine/routing_algorithms/map_matching.hpp"
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace engine
|
||||
{
|
||||
namespace routing_algorithms
|
||||
{
|
||||
|
||||
unsigned MapMatching::GetMedianSampleTime(const std::vector<unsigned> ×tamps) const
|
||||
{
|
||||
BOOST_ASSERT(timestamps.size() > 1);
|
||||
|
||||
std::vector<unsigned> sample_times(timestamps.size());
|
||||
|
||||
std::adjacent_difference(timestamps.begin(), timestamps.end(), sample_times.begin());
|
||||
|
||||
// don't use first element of sample_times -> will not be a difference.
|
||||
auto first_elem = std::next(sample_times.begin());
|
||||
auto median = first_elem + std::distance(first_elem, sample_times.end()) / 2;
|
||||
std::nth_element(first_elem, median, sample_times.end());
|
||||
return *median;
|
||||
}
|
||||
|
||||
SubMatchingList MapMatching::
|
||||
operator()(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const CandidateLists &candidates_list,
|
||||
const std::vector<util::Coordinate> &trace_coordinates,
|
||||
const std::vector<unsigned> &trace_timestamps,
|
||||
const std::vector<boost::optional<double>> &trace_gps_precision) const
|
||||
{
|
||||
SubMatchingList sub_matchings;
|
||||
|
||||
BOOST_ASSERT(candidates_list.size() == trace_coordinates.size());
|
||||
BOOST_ASSERT(candidates_list.size() > 1);
|
||||
|
||||
const bool use_timestamps = trace_timestamps.size() > 1;
|
||||
|
||||
const auto median_sample_time = [&] {
|
||||
if (use_timestamps)
|
||||
{
|
||||
return std::max(1u, GetMedianSampleTime(trace_timestamps));
|
||||
}
|
||||
else
|
||||
{
|
||||
return 1u;
|
||||
}
|
||||
}();
|
||||
const auto max_broken_time = median_sample_time * MAX_BROKEN_STATES;
|
||||
const auto max_distance_delta = [&] {
|
||||
if (use_timestamps)
|
||||
{
|
||||
return median_sample_time * facade->GetMapMatchingMaxSpeed();
|
||||
}
|
||||
else
|
||||
{
|
||||
return MAX_DISTANCE_DELTA;
|
||||
}
|
||||
}();
|
||||
|
||||
std::vector<std::vector<double>> emission_log_probabilities(trace_coordinates.size());
|
||||
if (trace_gps_precision.empty())
|
||||
{
|
||||
for (auto t = 0UL; t < candidates_list.size(); ++t)
|
||||
{
|
||||
emission_log_probabilities[t].resize(candidates_list[t].size());
|
||||
std::transform(candidates_list[t].begin(),
|
||||
candidates_list[t].end(),
|
||||
emission_log_probabilities[t].begin(),
|
||||
[this](const PhantomNodeWithDistance &candidate) {
|
||||
return default_emission_log_probability(candidate.distance);
|
||||
});
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (auto t = 0UL; t < candidates_list.size(); ++t)
|
||||
{
|
||||
emission_log_probabilities[t].resize(candidates_list[t].size());
|
||||
if (trace_gps_precision[t])
|
||||
{
|
||||
map_matching::EmissionLogProbability emission_log_probability(
|
||||
*trace_gps_precision[t]);
|
||||
std::transform(
|
||||
candidates_list[t].begin(),
|
||||
candidates_list[t].end(),
|
||||
emission_log_probabilities[t].begin(),
|
||||
[&emission_log_probability](const PhantomNodeWithDistance &candidate) {
|
||||
return emission_log_probability(candidate.distance);
|
||||
});
|
||||
}
|
||||
else
|
||||
{
|
||||
std::transform(candidates_list[t].begin(),
|
||||
candidates_list[t].end(),
|
||||
emission_log_probabilities[t].begin(),
|
||||
[this](const PhantomNodeWithDistance &candidate) {
|
||||
return default_emission_log_probability(candidate.distance);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
HMM model(candidates_list, emission_log_probabilities);
|
||||
|
||||
std::size_t initial_timestamp = model.initialize(0);
|
||||
if (initial_timestamp == map_matching::INVALID_STATE)
|
||||
{
|
||||
return sub_matchings;
|
||||
}
|
||||
|
||||
engine_working_data.InitializeOrClearFirstThreadLocalStorage(facade->GetNumberOfNodes());
|
||||
engine_working_data.InitializeOrClearSecondThreadLocalStorage(facade->GetNumberOfNodes());
|
||||
|
||||
QueryHeap &forward_heap = *(engine_working_data.forward_heap_1);
|
||||
QueryHeap &reverse_heap = *(engine_working_data.reverse_heap_1);
|
||||
QueryHeap &forward_core_heap = *(engine_working_data.forward_heap_2);
|
||||
QueryHeap &reverse_core_heap = *(engine_working_data.reverse_heap_2);
|
||||
|
||||
std::size_t breakage_begin = map_matching::INVALID_STATE;
|
||||
std::vector<std::size_t> split_points;
|
||||
std::vector<std::size_t> prev_unbroken_timestamps;
|
||||
prev_unbroken_timestamps.reserve(candidates_list.size());
|
||||
prev_unbroken_timestamps.push_back(initial_timestamp);
|
||||
for (auto t = initial_timestamp + 1; t < candidates_list.size(); ++t)
|
||||
{
|
||||
|
||||
const bool gap_in_trace = [&, use_timestamps]() {
|
||||
// use temporal information if available to determine a split
|
||||
if (use_timestamps)
|
||||
{
|
||||
return trace_timestamps[t] - trace_timestamps[prev_unbroken_timestamps.back()] >
|
||||
max_broken_time;
|
||||
}
|
||||
else
|
||||
{
|
||||
return t - prev_unbroken_timestamps.back() > MAX_BROKEN_STATES;
|
||||
}
|
||||
}();
|
||||
|
||||
if (!gap_in_trace)
|
||||
{
|
||||
BOOST_ASSERT(!prev_unbroken_timestamps.empty());
|
||||
const std::size_t prev_unbroken_timestamp = prev_unbroken_timestamps.back();
|
||||
|
||||
const auto &prev_viterbi = model.viterbi[prev_unbroken_timestamp];
|
||||
const auto &prev_pruned = model.pruned[prev_unbroken_timestamp];
|
||||
const auto &prev_unbroken_timestamps_list = candidates_list[prev_unbroken_timestamp];
|
||||
const auto &prev_coordinate = trace_coordinates[prev_unbroken_timestamp];
|
||||
|
||||
auto ¤t_viterbi = model.viterbi[t];
|
||||
auto ¤t_pruned = model.pruned[t];
|
||||
auto ¤t_parents = model.parents[t];
|
||||
auto ¤t_lengths = model.path_distances[t];
|
||||
const auto ¤t_timestamps_list = candidates_list[t];
|
||||
const auto ¤t_coordinate = trace_coordinates[t];
|
||||
|
||||
const auto haversine_distance = util::coordinate_calculation::haversineDistance(
|
||||
prev_coordinate, current_coordinate);
|
||||
// assumes minumum of 0.1 m/s
|
||||
const int duration_upper_bound =
|
||||
((haversine_distance + max_distance_delta) * 0.25) * 10;
|
||||
|
||||
// compute d_t for this timestamp and the next one
|
||||
for (const auto s : util::irange<std::size_t>(0UL, prev_viterbi.size()))
|
||||
{
|
||||
if (prev_pruned[s])
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
for (const auto s_prime : util::irange<std::size_t>(0UL, current_viterbi.size()))
|
||||
{
|
||||
const double emission_pr = emission_log_probabilities[t][s_prime];
|
||||
double new_value = prev_viterbi[s] + emission_pr;
|
||||
if (current_viterbi[s_prime] > new_value)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
forward_heap.Clear();
|
||||
reverse_heap.Clear();
|
||||
|
||||
double network_distance;
|
||||
if (facade->GetCoreSize() > 0)
|
||||
{
|
||||
forward_core_heap.Clear();
|
||||
reverse_core_heap.Clear();
|
||||
network_distance = super::GetNetworkDistanceWithCore(
|
||||
facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
prev_unbroken_timestamps_list[s].phantom_node,
|
||||
current_timestamps_list[s_prime].phantom_node,
|
||||
duration_upper_bound);
|
||||
}
|
||||
else
|
||||
{
|
||||
network_distance = super::GetNetworkDistance(
|
||||
facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
prev_unbroken_timestamps_list[s].phantom_node,
|
||||
current_timestamps_list[s_prime].phantom_node);
|
||||
}
|
||||
|
||||
// get distance diff between loc1/2 and locs/s_prime
|
||||
const auto d_t = std::abs(network_distance - haversine_distance);
|
||||
|
||||
// very low probability transition -> prune
|
||||
if (d_t >= max_distance_delta)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
const double transition_pr = transition_log_probability(d_t);
|
||||
new_value += transition_pr;
|
||||
|
||||
if (new_value > current_viterbi[s_prime])
|
||||
{
|
||||
current_viterbi[s_prime] = new_value;
|
||||
current_parents[s_prime] = std::make_pair(prev_unbroken_timestamp, s);
|
||||
current_lengths[s_prime] = network_distance;
|
||||
current_pruned[s_prime] = false;
|
||||
model.breakage[t] = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (model.breakage[t])
|
||||
{
|
||||
// save start of breakage -> we need this as split point
|
||||
if (t < breakage_begin)
|
||||
{
|
||||
breakage_begin = t;
|
||||
}
|
||||
|
||||
BOOST_ASSERT(prev_unbroken_timestamps.size() > 0);
|
||||
// remove both ends of the breakage
|
||||
prev_unbroken_timestamps.pop_back();
|
||||
}
|
||||
else
|
||||
{
|
||||
prev_unbroken_timestamps.push_back(t);
|
||||
}
|
||||
}
|
||||
|
||||
// breakage recover has removed all previous good points
|
||||
const bool trace_split = prev_unbroken_timestamps.empty();
|
||||
|
||||
if (trace_split || gap_in_trace)
|
||||
{
|
||||
std::size_t split_index = t;
|
||||
if (breakage_begin != map_matching::INVALID_STATE)
|
||||
{
|
||||
split_index = breakage_begin;
|
||||
breakage_begin = map_matching::INVALID_STATE;
|
||||
}
|
||||
split_points.push_back(split_index);
|
||||
|
||||
// note: this preserves everything before split_index
|
||||
model.Clear(split_index);
|
||||
std::size_t new_start = model.initialize(split_index);
|
||||
// no new start was found -> stop viterbi calculation
|
||||
if (new_start == map_matching::INVALID_STATE)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
prev_unbroken_timestamps.clear();
|
||||
prev_unbroken_timestamps.push_back(new_start);
|
||||
// Important: We potentially go back here!
|
||||
// However since t > new_start >= breakge_begin
|
||||
// we can only reset trace_coordindates.size() times.
|
||||
t = new_start;
|
||||
// note: the head of the loop will call ++t, hence the next
|
||||
// iteration will actually be on new_start+1
|
||||
}
|
||||
}
|
||||
|
||||
if (!prev_unbroken_timestamps.empty())
|
||||
{
|
||||
split_points.push_back(prev_unbroken_timestamps.back() + 1);
|
||||
}
|
||||
|
||||
std::size_t sub_matching_begin = initial_timestamp;
|
||||
for (const auto sub_matching_end : split_points)
|
||||
{
|
||||
map_matching::SubMatching matching;
|
||||
|
||||
std::size_t parent_timestamp_index = sub_matching_end - 1;
|
||||
while (parent_timestamp_index >= sub_matching_begin &&
|
||||
model.breakage[parent_timestamp_index])
|
||||
{
|
||||
--parent_timestamp_index;
|
||||
}
|
||||
while (sub_matching_begin < sub_matching_end && model.breakage[sub_matching_begin])
|
||||
{
|
||||
++sub_matching_begin;
|
||||
}
|
||||
|
||||
// matchings that only consist of one candidate are invalid
|
||||
if (parent_timestamp_index - sub_matching_begin + 1 < 2)
|
||||
{
|
||||
sub_matching_begin = sub_matching_end;
|
||||
continue;
|
||||
}
|
||||
|
||||
// loop through the columns, and only compare the last entry
|
||||
const auto max_element_iter =
|
||||
std::max_element(model.viterbi[parent_timestamp_index].begin(),
|
||||
model.viterbi[parent_timestamp_index].end());
|
||||
|
||||
std::size_t parent_candidate_index =
|
||||
std::distance(model.viterbi[parent_timestamp_index].begin(), max_element_iter);
|
||||
|
||||
std::deque<std::pair<std::size_t, std::size_t>> reconstructed_indices;
|
||||
while (parent_timestamp_index > sub_matching_begin)
|
||||
{
|
||||
if (model.breakage[parent_timestamp_index])
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
reconstructed_indices.emplace_front(parent_timestamp_index, parent_candidate_index);
|
||||
const auto &next = model.parents[parent_timestamp_index][parent_candidate_index];
|
||||
// make sure we can never get stuck in this loop
|
||||
if (parent_timestamp_index == next.first)
|
||||
{
|
||||
break;
|
||||
}
|
||||
parent_timestamp_index = next.first;
|
||||
parent_candidate_index = next.second;
|
||||
}
|
||||
reconstructed_indices.emplace_front(parent_timestamp_index, parent_candidate_index);
|
||||
if (reconstructed_indices.size() < 2)
|
||||
{
|
||||
sub_matching_begin = sub_matching_end;
|
||||
continue;
|
||||
}
|
||||
|
||||
auto matching_distance = 0.0;
|
||||
auto trace_distance = 0.0;
|
||||
matching.nodes.reserve(reconstructed_indices.size());
|
||||
matching.indices.reserve(reconstructed_indices.size());
|
||||
for (const auto &idx : reconstructed_indices)
|
||||
{
|
||||
const auto timestamp_index = idx.first;
|
||||
const auto location_index = idx.second;
|
||||
|
||||
matching.indices.push_back(timestamp_index);
|
||||
matching.nodes.push_back(candidates_list[timestamp_index][location_index].phantom_node);
|
||||
matching_distance += model.path_distances[timestamp_index][location_index];
|
||||
}
|
||||
util::for_each_pair(
|
||||
reconstructed_indices,
|
||||
[&trace_distance, &trace_coordinates](const std::pair<std::size_t, std::size_t> &prev,
|
||||
const std::pair<std::size_t, std::size_t> &curr) {
|
||||
trace_distance += util::coordinate_calculation::haversineDistance(
|
||||
trace_coordinates[prev.first], trace_coordinates[curr.first]);
|
||||
});
|
||||
|
||||
matching.confidence = confidence(trace_distance, matching_distance);
|
||||
|
||||
sub_matchings.push_back(matching);
|
||||
sub_matching_begin = sub_matching_end;
|
||||
}
|
||||
|
||||
return sub_matchings;
|
||||
}
|
||||
|
||||
} // namespace routing_algorithms
|
||||
} // namespace engine
|
||||
} // namespace osrm
|
||||
|
||||
//[1] "Hidden Markov Map Matching Through Noise and Sparseness"; P. Newson and J. Krumm; 2009; ACM
|
||||
// GIS
|
689
src/engine/routing_algorithms/routing_base.cpp
Normal file
689
src/engine/routing_algorithms/routing_base.cpp
Normal file
@ -0,0 +1,689 @@
|
||||
#include "engine/routing_algorithms/routing_base.hpp"
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace engine
|
||||
{
|
||||
namespace routing_algorithms
|
||||
{
|
||||
|
||||
void BasicRoutingInterface::RoutingStep(
|
||||
const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
NodeID &middle_node_id,
|
||||
std::int32_t &upper_bound,
|
||||
std::int32_t min_edge_offset,
|
||||
const bool forward_direction,
|
||||
const bool stalling,
|
||||
const bool force_loop_forward,
|
||||
const bool force_loop_reverse) const
|
||||
{
|
||||
const NodeID node = forward_heap.DeleteMin();
|
||||
const std::int32_t weight = forward_heap.GetKey(node);
|
||||
|
||||
if (reverse_heap.WasInserted(node))
|
||||
{
|
||||
const std::int32_t new_weight = reverse_heap.GetKey(node) + weight;
|
||||
if (new_weight < upper_bound)
|
||||
{
|
||||
// if loops are forced, they are so at the source
|
||||
if ((force_loop_forward && forward_heap.GetData(node).parent == node) ||
|
||||
(force_loop_reverse && reverse_heap.GetData(node).parent == node) ||
|
||||
// in this case we are looking at a bi-directional way where the source
|
||||
// and target phantom are on the same edge based node
|
||||
new_weight < 0)
|
||||
{
|
||||
// check whether there is a loop present at the node
|
||||
for (const auto edge : facade->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const EdgeData &data = facade->GetEdgeData(edge);
|
||||
bool forward_directionFlag = (forward_direction ? data.forward : data.backward);
|
||||
if (forward_directionFlag)
|
||||
{
|
||||
const NodeID to = facade->GetTarget(edge);
|
||||
if (to == node)
|
||||
{
|
||||
const EdgeWeight edge_weight = data.weight;
|
||||
const std::int32_t loop_weight = new_weight + edge_weight;
|
||||
if (loop_weight >= 0 && loop_weight < upper_bound)
|
||||
{
|
||||
middle_node_id = node;
|
||||
upper_bound = loop_weight;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
BOOST_ASSERT(new_weight >= 0);
|
||||
|
||||
middle_node_id = node;
|
||||
upper_bound = new_weight;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// make sure we don't terminate too early if we initialize the weight
|
||||
// for the nodes in the forward heap with the forward/reverse offset
|
||||
BOOST_ASSERT(min_edge_offset <= 0);
|
||||
if (weight + min_edge_offset > upper_bound)
|
||||
{
|
||||
forward_heap.DeleteAll();
|
||||
return;
|
||||
}
|
||||
|
||||
// Stalling
|
||||
if (stalling)
|
||||
{
|
||||
for (const auto edge : facade->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const EdgeData &data = facade->GetEdgeData(edge);
|
||||
const bool reverse_flag = ((!forward_direction) ? data.forward : data.backward);
|
||||
if (reverse_flag)
|
||||
{
|
||||
const NodeID to = facade->GetTarget(edge);
|
||||
const EdgeWeight edge_weight = data.weight;
|
||||
|
||||
BOOST_ASSERT_MSG(edge_weight > 0, "edge_weight invalid");
|
||||
|
||||
if (forward_heap.WasInserted(to))
|
||||
{
|
||||
if (forward_heap.GetKey(to) + edge_weight < weight)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto edge : facade->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const EdgeData &data = facade->GetEdgeData(edge);
|
||||
bool forward_directionFlag = (forward_direction ? data.forward : data.backward);
|
||||
if (forward_directionFlag)
|
||||
{
|
||||
const NodeID to = facade->GetTarget(edge);
|
||||
const EdgeWeight edge_weight = data.weight;
|
||||
|
||||
BOOST_ASSERT_MSG(edge_weight > 0, "edge_weight invalid");
|
||||
const int to_weight = weight + edge_weight;
|
||||
|
||||
// New Node discovered -> Add to Heap + Node Info Storage
|
||||
if (!forward_heap.WasInserted(to))
|
||||
{
|
||||
forward_heap.Insert(to, to_weight, node);
|
||||
}
|
||||
// Found a shorter Path -> Update weight
|
||||
else if (to_weight < forward_heap.GetKey(to))
|
||||
{
|
||||
// new parent
|
||||
forward_heap.GetData(to).parent = node;
|
||||
forward_heap.DecreaseKey(to, to_weight);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
EdgeWeight
|
||||
BasicRoutingInterface::GetLoopWeight(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
NodeID node) const
|
||||
{
|
||||
EdgeWeight loop_weight = INVALID_EDGE_WEIGHT;
|
||||
for (auto edge : facade->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const auto &data = facade->GetEdgeData(edge);
|
||||
if (data.forward)
|
||||
{
|
||||
const NodeID to = facade->GetTarget(edge);
|
||||
if (to == node)
|
||||
{
|
||||
loop_weight = std::min(loop_weight, data.weight);
|
||||
}
|
||||
}
|
||||
}
|
||||
return loop_weight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Unpacks a single edge (NodeID->NodeID) from the CH graph down to it's original non-shortcut
|
||||
* route.
|
||||
* @param from the node the CH edge starts at
|
||||
* @param to the node the CH edge finishes at
|
||||
* @param unpacked_path the sequence of original NodeIDs that make up the expanded CH edge
|
||||
*/
|
||||
void BasicRoutingInterface::UnpackEdge(
|
||||
const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const NodeID from,
|
||||
const NodeID to,
|
||||
std::vector<NodeID> &unpacked_path) const
|
||||
{
|
||||
std::array<NodeID, 2> path{{from, to}};
|
||||
UnpackCHPath(
|
||||
*facade,
|
||||
path.begin(),
|
||||
path.end(),
|
||||
[&unpacked_path](const std::pair<NodeID, NodeID> &edge, const EdgeData & /* data */) {
|
||||
unpacked_path.emplace_back(edge.first);
|
||||
});
|
||||
unpacked_path.emplace_back(to);
|
||||
}
|
||||
|
||||
void BasicRoutingInterface::RetrievePackedPathFromHeap(
|
||||
const SearchEngineData::QueryHeap &forward_heap,
|
||||
const SearchEngineData::QueryHeap &reverse_heap,
|
||||
const NodeID middle_node_id,
|
||||
std::vector<NodeID> &packed_path) const
|
||||
{
|
||||
RetrievePackedPathFromSingleHeap(forward_heap, middle_node_id, packed_path);
|
||||
std::reverse(packed_path.begin(), packed_path.end());
|
||||
packed_path.emplace_back(middle_node_id);
|
||||
RetrievePackedPathFromSingleHeap(reverse_heap, middle_node_id, packed_path);
|
||||
}
|
||||
|
||||
void BasicRoutingInterface::RetrievePackedPathFromSingleHeap(
|
||||
const SearchEngineData::QueryHeap &search_heap,
|
||||
const NodeID middle_node_id,
|
||||
std::vector<NodeID> &packed_path) const
|
||||
{
|
||||
NodeID current_node_id = middle_node_id;
|
||||
// all initial nodes will have itself as parent, or a node not in the heap
|
||||
// in case of a core search heap. We need a distinction between core entry nodes
|
||||
// and start nodes since otherwise start node specific code that assumes
|
||||
// node == node.parent (e.g. the loop code) might get actived.
|
||||
while (current_node_id != search_heap.GetData(current_node_id).parent &&
|
||||
search_heap.WasInserted(search_heap.GetData(current_node_id).parent))
|
||||
{
|
||||
current_node_id = search_heap.GetData(current_node_id).parent;
|
||||
packed_path.emplace_back(current_node_id);
|
||||
}
|
||||
}
|
||||
|
||||
// assumes that heaps are already setup correctly.
|
||||
// ATTENTION: This only works if no additional offset is supplied next to the Phantom Node
|
||||
// Offsets.
|
||||
// In case additional offsets are supplied, you might have to force a loop first.
|
||||
// A forced loop might be necessary, if source and target are on the same segment.
|
||||
// If this is the case and the offsets of the respective direction are larger for the source
|
||||
// than the target
|
||||
// then a force loop is required (e.g. source_phantom.forward_segment_id ==
|
||||
// target_phantom.forward_segment_id
|
||||
// && source_phantom.GetForwardWeightPlusOffset() > target_phantom.GetForwardWeightPlusOffset())
|
||||
// requires
|
||||
// a force loop, if the heaps have been initialized with positive offsets.
|
||||
void BasicRoutingInterface::Search(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
std::int32_t &weight,
|
||||
std::vector<NodeID> &packed_leg,
|
||||
const bool force_loop_forward,
|
||||
const bool force_loop_reverse,
|
||||
const int duration_upper_bound) const
|
||||
{
|
||||
NodeID middle = SPECIAL_NODEID;
|
||||
weight = duration_upper_bound;
|
||||
|
||||
// get offset to account for offsets on phantom nodes on compressed edges
|
||||
const auto min_edge_offset = std::min(0, forward_heap.MinKey());
|
||||
BOOST_ASSERT(min_edge_offset <= 0);
|
||||
// we only every insert negative offsets for nodes in the forward heap
|
||||
BOOST_ASSERT(reverse_heap.MinKey() >= 0);
|
||||
|
||||
// run two-Target Dijkstra routing step.
|
||||
const constexpr bool STALLING_ENABLED = true;
|
||||
while (0 < (forward_heap.Size() + reverse_heap.Size()))
|
||||
{
|
||||
if (!forward_heap.Empty())
|
||||
{
|
||||
RoutingStep(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
middle,
|
||||
weight,
|
||||
min_edge_offset,
|
||||
true,
|
||||
STALLING_ENABLED,
|
||||
force_loop_forward,
|
||||
force_loop_reverse);
|
||||
}
|
||||
if (!reverse_heap.Empty())
|
||||
{
|
||||
RoutingStep(facade,
|
||||
reverse_heap,
|
||||
forward_heap,
|
||||
middle,
|
||||
weight,
|
||||
min_edge_offset,
|
||||
false,
|
||||
STALLING_ENABLED,
|
||||
force_loop_reverse,
|
||||
force_loop_forward);
|
||||
}
|
||||
}
|
||||
|
||||
// No path found for both target nodes?
|
||||
if (duration_upper_bound <= weight || SPECIAL_NODEID == middle)
|
||||
{
|
||||
weight = INVALID_EDGE_WEIGHT;
|
||||
return;
|
||||
}
|
||||
|
||||
// Was a paths over one of the forward/reverse nodes not found?
|
||||
BOOST_ASSERT_MSG((SPECIAL_NODEID != middle && INVALID_EDGE_WEIGHT != weight), "no path found");
|
||||
|
||||
// make sure to correctly unpack loops
|
||||
if (weight != forward_heap.GetKey(middle) + reverse_heap.GetKey(middle))
|
||||
{
|
||||
// self loop makes up the full path
|
||||
packed_leg.push_back(middle);
|
||||
packed_leg.push_back(middle);
|
||||
}
|
||||
else
|
||||
{
|
||||
RetrievePackedPathFromHeap(forward_heap, reverse_heap, middle, packed_leg);
|
||||
}
|
||||
}
|
||||
|
||||
// assumes that heaps are already setup correctly.
|
||||
// A forced loop might be necessary, if source and target are on the same segment.
|
||||
// If this is the case and the offsets of the respective direction are larger for the source
|
||||
// than the target
|
||||
// then a force loop is required (e.g. source_phantom.forward_segment_id ==
|
||||
// target_phantom.forward_segment_id
|
||||
// && source_phantom.GetForwardWeightPlusOffset() > target_phantom.GetForwardWeightPlusOffset())
|
||||
// requires
|
||||
// a force loop, if the heaps have been initialized with positive offsets.
|
||||
void BasicRoutingInterface::SearchWithCore(
|
||||
const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
SearchEngineData::QueryHeap &forward_core_heap,
|
||||
SearchEngineData::QueryHeap &reverse_core_heap,
|
||||
int &weight,
|
||||
std::vector<NodeID> &packed_leg,
|
||||
const bool force_loop_forward,
|
||||
const bool force_loop_reverse,
|
||||
int duration_upper_bound) const
|
||||
{
|
||||
NodeID middle = SPECIAL_NODEID;
|
||||
weight = duration_upper_bound;
|
||||
|
||||
using CoreEntryPoint = std::tuple<NodeID, EdgeWeight, NodeID>;
|
||||
std::vector<CoreEntryPoint> forward_entry_points;
|
||||
std::vector<CoreEntryPoint> reverse_entry_points;
|
||||
|
||||
// get offset to account for offsets on phantom nodes on compressed edges
|
||||
const auto min_edge_offset = std::min(0, forward_heap.MinKey());
|
||||
// we only every insert negative offsets for nodes in the forward heap
|
||||
BOOST_ASSERT(reverse_heap.MinKey() >= 0);
|
||||
|
||||
const constexpr bool STALLING_ENABLED = true;
|
||||
// run two-Target Dijkstra routing step.
|
||||
while (0 < (forward_heap.Size() + reverse_heap.Size()))
|
||||
{
|
||||
if (!forward_heap.Empty())
|
||||
{
|
||||
if (facade->IsCoreNode(forward_heap.Min()))
|
||||
{
|
||||
const NodeID node = forward_heap.DeleteMin();
|
||||
const int key = forward_heap.GetKey(node);
|
||||
forward_entry_points.emplace_back(node, key, forward_heap.GetData(node).parent);
|
||||
}
|
||||
else
|
||||
{
|
||||
RoutingStep(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
middle,
|
||||
weight,
|
||||
min_edge_offset,
|
||||
true,
|
||||
STALLING_ENABLED,
|
||||
force_loop_forward,
|
||||
force_loop_reverse);
|
||||
}
|
||||
}
|
||||
if (!reverse_heap.Empty())
|
||||
{
|
||||
if (facade->IsCoreNode(reverse_heap.Min()))
|
||||
{
|
||||
const NodeID node = reverse_heap.DeleteMin();
|
||||
const int key = reverse_heap.GetKey(node);
|
||||
reverse_entry_points.emplace_back(node, key, reverse_heap.GetData(node).parent);
|
||||
}
|
||||
else
|
||||
{
|
||||
RoutingStep(facade,
|
||||
reverse_heap,
|
||||
forward_heap,
|
||||
middle,
|
||||
weight,
|
||||
min_edge_offset,
|
||||
false,
|
||||
STALLING_ENABLED,
|
||||
force_loop_reverse,
|
||||
force_loop_forward);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const auto insertInCoreHeap = [](const CoreEntryPoint &p,
|
||||
SearchEngineData::QueryHeap &core_heap) {
|
||||
NodeID id;
|
||||
EdgeWeight weight;
|
||||
NodeID parent;
|
||||
// TODO this should use std::apply when we get c++17 support
|
||||
std::tie(id, weight, parent) = p;
|
||||
core_heap.Insert(id, weight, parent);
|
||||
};
|
||||
|
||||
forward_core_heap.Clear();
|
||||
for (const auto &p : forward_entry_points)
|
||||
{
|
||||
insertInCoreHeap(p, forward_core_heap);
|
||||
}
|
||||
|
||||
reverse_core_heap.Clear();
|
||||
for (const auto &p : reverse_entry_points)
|
||||
{
|
||||
insertInCoreHeap(p, reverse_core_heap);
|
||||
}
|
||||
|
||||
// get offset to account for offsets on phantom nodes on compressed edges
|
||||
int min_core_edge_offset = 0;
|
||||
if (forward_core_heap.Size() > 0)
|
||||
{
|
||||
min_core_edge_offset = std::min(min_core_edge_offset, forward_core_heap.MinKey());
|
||||
}
|
||||
if (reverse_core_heap.Size() > 0 && reverse_core_heap.MinKey() < 0)
|
||||
{
|
||||
min_core_edge_offset = std::min(min_core_edge_offset, reverse_core_heap.MinKey());
|
||||
}
|
||||
BOOST_ASSERT(min_core_edge_offset <= 0);
|
||||
|
||||
// run two-target Dijkstra routing step on core with termination criterion
|
||||
const constexpr bool STALLING_DISABLED = false;
|
||||
while (0 < forward_core_heap.Size() && 0 < reverse_core_heap.Size() &&
|
||||
weight > (forward_core_heap.MinKey() + reverse_core_heap.MinKey()))
|
||||
{
|
||||
RoutingStep(facade,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
middle,
|
||||
weight,
|
||||
min_core_edge_offset,
|
||||
true,
|
||||
STALLING_DISABLED,
|
||||
force_loop_forward,
|
||||
force_loop_reverse);
|
||||
|
||||
RoutingStep(facade,
|
||||
reverse_core_heap,
|
||||
forward_core_heap,
|
||||
middle,
|
||||
weight,
|
||||
min_core_edge_offset,
|
||||
false,
|
||||
STALLING_DISABLED,
|
||||
force_loop_reverse,
|
||||
force_loop_forward);
|
||||
}
|
||||
|
||||
// No path found for both target nodes?
|
||||
if (duration_upper_bound <= weight || SPECIAL_NODEID == middle)
|
||||
{
|
||||
weight = INVALID_EDGE_WEIGHT;
|
||||
return;
|
||||
}
|
||||
|
||||
// Was a paths over one of the forward/reverse nodes not found?
|
||||
BOOST_ASSERT_MSG((SPECIAL_NODEID != middle && INVALID_EDGE_WEIGHT != weight), "no path found");
|
||||
|
||||
// we need to unpack sub path from core heaps
|
||||
if (facade->IsCoreNode(middle))
|
||||
{
|
||||
if (weight != forward_core_heap.GetKey(middle) + reverse_core_heap.GetKey(middle))
|
||||
{
|
||||
// self loop
|
||||
BOOST_ASSERT(forward_core_heap.GetData(middle).parent == middle &&
|
||||
reverse_core_heap.GetData(middle).parent == middle);
|
||||
packed_leg.push_back(middle);
|
||||
packed_leg.push_back(middle);
|
||||
}
|
||||
else
|
||||
{
|
||||
std::vector<NodeID> packed_core_leg;
|
||||
RetrievePackedPathFromHeap(
|
||||
forward_core_heap, reverse_core_heap, middle, packed_core_leg);
|
||||
BOOST_ASSERT(packed_core_leg.size() > 0);
|
||||
RetrievePackedPathFromSingleHeap(forward_heap, packed_core_leg.front(), packed_leg);
|
||||
std::reverse(packed_leg.begin(), packed_leg.end());
|
||||
packed_leg.insert(packed_leg.end(), packed_core_leg.begin(), packed_core_leg.end());
|
||||
RetrievePackedPathFromSingleHeap(reverse_heap, packed_core_leg.back(), packed_leg);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (weight != forward_heap.GetKey(middle) + reverse_heap.GetKey(middle))
|
||||
{
|
||||
// self loop
|
||||
BOOST_ASSERT(forward_heap.GetData(middle).parent == middle &&
|
||||
reverse_heap.GetData(middle).parent == middle);
|
||||
packed_leg.push_back(middle);
|
||||
packed_leg.push_back(middle);
|
||||
}
|
||||
else
|
||||
{
|
||||
RetrievePackedPathFromHeap(forward_heap, reverse_heap, middle, packed_leg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool BasicRoutingInterface::NeedsLoopForward(const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom) const
|
||||
{
|
||||
return source_phantom.forward_segment_id.enabled && target_phantom.forward_segment_id.enabled &&
|
||||
source_phantom.forward_segment_id.id == target_phantom.forward_segment_id.id &&
|
||||
source_phantom.GetForwardWeightPlusOffset() >
|
||||
target_phantom.GetForwardWeightPlusOffset();
|
||||
}
|
||||
|
||||
bool BasicRoutingInterface::NeedsLoopBackwards(const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom) const
|
||||
{
|
||||
return source_phantom.reverse_segment_id.enabled && target_phantom.reverse_segment_id.enabled &&
|
||||
source_phantom.reverse_segment_id.id == target_phantom.reverse_segment_id.id &&
|
||||
source_phantom.GetReverseWeightPlusOffset() >
|
||||
target_phantom.GetReverseWeightPlusOffset();
|
||||
}
|
||||
|
||||
double BasicRoutingInterface::GetPathDistance(
|
||||
const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const std::vector<NodeID> &packed_path,
|
||||
const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom) const
|
||||
{
|
||||
std::vector<PathData> unpacked_path;
|
||||
PhantomNodes nodes;
|
||||
nodes.source_phantom = source_phantom;
|
||||
nodes.target_phantom = target_phantom;
|
||||
UnpackPath(facade, packed_path.begin(), packed_path.end(), nodes, unpacked_path);
|
||||
|
||||
using util::coordinate_calculation::detail::DEGREE_TO_RAD;
|
||||
using util::coordinate_calculation::detail::EARTH_RADIUS;
|
||||
|
||||
double distance = 0;
|
||||
double prev_lat = static_cast<double>(toFloating(source_phantom.location.lat)) * DEGREE_TO_RAD;
|
||||
double prev_lon = static_cast<double>(toFloating(source_phantom.location.lon)) * DEGREE_TO_RAD;
|
||||
double prev_cos = std::cos(prev_lat);
|
||||
for (const auto &p : unpacked_path)
|
||||
{
|
||||
const auto current_coordinate = facade->GetCoordinateOfNode(p.turn_via_node);
|
||||
|
||||
const double current_lat =
|
||||
static_cast<double>(toFloating(current_coordinate.lat)) * DEGREE_TO_RAD;
|
||||
const double current_lon =
|
||||
static_cast<double>(toFloating(current_coordinate.lon)) * DEGREE_TO_RAD;
|
||||
const double current_cos = std::cos(current_lat);
|
||||
|
||||
const double sin_dlon = std::sin((prev_lon - current_lon) / 2.0);
|
||||
const double sin_dlat = std::sin((prev_lat - current_lat) / 2.0);
|
||||
|
||||
const double aharv = sin_dlat * sin_dlat + prev_cos * current_cos * sin_dlon * sin_dlon;
|
||||
const double charv = 2. * std::atan2(std::sqrt(aharv), std::sqrt(1.0 - aharv));
|
||||
distance += EARTH_RADIUS * charv;
|
||||
|
||||
prev_lat = current_lat;
|
||||
prev_lon = current_lon;
|
||||
prev_cos = current_cos;
|
||||
}
|
||||
|
||||
const double current_lat =
|
||||
static_cast<double>(toFloating(target_phantom.location.lat)) * DEGREE_TO_RAD;
|
||||
const double current_lon =
|
||||
static_cast<double>(toFloating(target_phantom.location.lon)) * DEGREE_TO_RAD;
|
||||
const double current_cos = std::cos(current_lat);
|
||||
|
||||
const double sin_dlon = std::sin((prev_lon - current_lon) / 2.0);
|
||||
const double sin_dlat = std::sin((prev_lat - current_lat) / 2.0);
|
||||
|
||||
const double aharv = sin_dlat * sin_dlat + prev_cos * current_cos * sin_dlon * sin_dlon;
|
||||
const double charv = 2. * std::atan2(std::sqrt(aharv), std::sqrt(1.0 - aharv));
|
||||
distance += EARTH_RADIUS * charv;
|
||||
|
||||
return distance;
|
||||
}
|
||||
|
||||
// Requires the heaps for be empty
|
||||
// If heaps should be adjusted to be initialized outside of this function,
|
||||
// the addition of force_loop parameters might be required
|
||||
double BasicRoutingInterface::GetNetworkDistanceWithCore(
|
||||
const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
SearchEngineData::QueryHeap &forward_core_heap,
|
||||
SearchEngineData::QueryHeap &reverse_core_heap,
|
||||
const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom,
|
||||
int duration_upper_bound) const
|
||||
{
|
||||
BOOST_ASSERT(forward_heap.Empty());
|
||||
BOOST_ASSERT(reverse_heap.Empty());
|
||||
|
||||
if (source_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.forward_segment_id.id,
|
||||
-source_phantom.GetForwardWeightPlusOffset(),
|
||||
source_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (source_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.reverse_segment_id.id,
|
||||
-source_phantom.GetReverseWeightPlusOffset(),
|
||||
source_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
if (target_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.forward_segment_id.id,
|
||||
target_phantom.GetForwardWeightPlusOffset(),
|
||||
target_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (target_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.reverse_segment_id.id,
|
||||
target_phantom.GetReverseWeightPlusOffset(),
|
||||
target_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
const bool constexpr DO_NOT_FORCE_LOOPS =
|
||||
false; // prevents forcing of loops, since offsets are set correctly
|
||||
|
||||
int duration = INVALID_EDGE_WEIGHT;
|
||||
std::vector<NodeID> packed_path;
|
||||
SearchWithCore(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
duration,
|
||||
packed_path,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
duration_upper_bound);
|
||||
|
||||
double distance = std::numeric_limits<double>::max();
|
||||
if (duration != INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
return GetPathDistance(facade, packed_path, source_phantom, target_phantom);
|
||||
}
|
||||
return distance;
|
||||
}
|
||||
|
||||
// Requires the heaps for be empty
|
||||
// If heaps should be adjusted to be initialized outside of this function,
|
||||
// the addition of force_loop parameters might be required
|
||||
double BasicRoutingInterface::GetNetworkDistance(
|
||||
const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
SearchEngineData::QueryHeap &forward_heap,
|
||||
SearchEngineData::QueryHeap &reverse_heap,
|
||||
const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom,
|
||||
int duration_upper_bound) const
|
||||
{
|
||||
BOOST_ASSERT(forward_heap.Empty());
|
||||
BOOST_ASSERT(reverse_heap.Empty());
|
||||
|
||||
if (source_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.forward_segment_id.id,
|
||||
-source_phantom.GetForwardWeightPlusOffset(),
|
||||
source_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (source_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.reverse_segment_id.id,
|
||||
-source_phantom.GetReverseWeightPlusOffset(),
|
||||
source_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
if (target_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.forward_segment_id.id,
|
||||
target_phantom.GetForwardWeightPlusOffset(),
|
||||
target_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (target_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.reverse_segment_id.id,
|
||||
target_phantom.GetReverseWeightPlusOffset(),
|
||||
target_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
const bool constexpr DO_NOT_FORCE_LOOPS =
|
||||
false; // prevents forcing of loops, since offsets are set correctly
|
||||
|
||||
int duration = INVALID_EDGE_WEIGHT;
|
||||
std::vector<NodeID> packed_path;
|
||||
Search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
duration,
|
||||
packed_path,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
DO_NOT_FORCE_LOOPS,
|
||||
duration_upper_bound);
|
||||
|
||||
if (duration == INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
return std::numeric_limits<double>::max();
|
||||
}
|
||||
|
||||
return GetPathDistance(facade, packed_path, source_phantom, target_phantom);
|
||||
}
|
||||
|
||||
} // namespace routing_algorithms
|
||||
} // namespace engine
|
||||
} // namespace osrm
|
499
src/engine/routing_algorithms/shortest_path.cpp
Normal file
499
src/engine/routing_algorithms/shortest_path.cpp
Normal file
@ -0,0 +1,499 @@
|
||||
#include "engine/routing_algorithms/shortest_path.hpp"
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace engine
|
||||
{
|
||||
namespace routing_algorithms
|
||||
{
|
||||
|
||||
// allows a uturn at the target_phantom
|
||||
// searches source forward/reverse -> target forward/reverse
|
||||
void ShortestPathRouting::SearchWithUTurn(
|
||||
const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
QueryHeap &forward_heap,
|
||||
QueryHeap &reverse_heap,
|
||||
QueryHeap &forward_core_heap,
|
||||
QueryHeap &reverse_core_heap,
|
||||
const bool search_from_forward_node,
|
||||
const bool search_from_reverse_node,
|
||||
const bool search_to_forward_node,
|
||||
const bool search_to_reverse_node,
|
||||
const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom,
|
||||
const int total_weight_to_forward,
|
||||
const int total_weight_to_reverse,
|
||||
int &new_total_weight,
|
||||
std::vector<NodeID> &leg_packed_path) const
|
||||
{
|
||||
forward_heap.Clear();
|
||||
reverse_heap.Clear();
|
||||
if (search_from_forward_node)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.forward_segment_id.id,
|
||||
-source_phantom.GetForwardWeightPlusOffset(),
|
||||
source_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (search_from_reverse_node)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.reverse_segment_id.id,
|
||||
-source_phantom.GetReverseWeightPlusOffset(),
|
||||
source_phantom.reverse_segment_id.id);
|
||||
}
|
||||
if (search_to_forward_node)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.forward_segment_id.id,
|
||||
target_phantom.GetForwardWeightPlusOffset(),
|
||||
target_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (search_to_reverse_node)
|
||||
{
|
||||
reverse_heap.Insert(target_phantom.reverse_segment_id.id,
|
||||
target_phantom.GetReverseWeightPlusOffset(),
|
||||
target_phantom.reverse_segment_id.id);
|
||||
}
|
||||
|
||||
BOOST_ASSERT(forward_heap.Size() > 0);
|
||||
BOOST_ASSERT(reverse_heap.Size() > 0);
|
||||
|
||||
// this is only relevent if source and target are on the same compressed edge
|
||||
auto is_oneway_source = !(search_from_forward_node && search_from_reverse_node);
|
||||
auto is_oneway_target = !(search_to_forward_node && search_to_reverse_node);
|
||||
// we only enable loops here if we can't search from forward to backward node
|
||||
auto needs_loop_forwad =
|
||||
is_oneway_source && super::NeedsLoopForward(source_phantom, target_phantom);
|
||||
auto needs_loop_backwards =
|
||||
is_oneway_target && super::NeedsLoopBackwards(source_phantom, target_phantom);
|
||||
if (facade->GetCoreSize() > 0)
|
||||
{
|
||||
forward_core_heap.Clear();
|
||||
reverse_core_heap.Clear();
|
||||
BOOST_ASSERT(forward_core_heap.Size() == 0);
|
||||
BOOST_ASSERT(reverse_core_heap.Size() == 0);
|
||||
super::SearchWithCore(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
new_total_weight,
|
||||
leg_packed_path,
|
||||
needs_loop_forwad,
|
||||
needs_loop_backwards);
|
||||
}
|
||||
else
|
||||
{
|
||||
super::Search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
new_total_weight,
|
||||
leg_packed_path,
|
||||
needs_loop_forwad,
|
||||
needs_loop_backwards);
|
||||
}
|
||||
// if no route is found between two parts of the via-route, the entire route becomes
|
||||
// invalid. Adding to invalid edge weight sadly doesn't return an invalid edge weight. Here
|
||||
// we prevent the possible overflow, faking the addition of infinity + x == infinity
|
||||
if (new_total_weight != INVALID_EDGE_WEIGHT)
|
||||
new_total_weight += std::min(total_weight_to_forward, total_weight_to_reverse);
|
||||
}
|
||||
|
||||
// searches shortest path between:
|
||||
// source forward/reverse -> target forward
|
||||
// source forward/reverse -> target reverse
|
||||
void ShortestPathRouting::Search(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
QueryHeap &forward_heap,
|
||||
QueryHeap &reverse_heap,
|
||||
QueryHeap &forward_core_heap,
|
||||
QueryHeap &reverse_core_heap,
|
||||
const bool search_from_forward_node,
|
||||
const bool search_from_reverse_node,
|
||||
const bool search_to_forward_node,
|
||||
const bool search_to_reverse_node,
|
||||
const PhantomNode &source_phantom,
|
||||
const PhantomNode &target_phantom,
|
||||
const int total_weight_to_forward,
|
||||
const int total_weight_to_reverse,
|
||||
int &new_total_weight_to_forward,
|
||||
int &new_total_weight_to_reverse,
|
||||
std::vector<NodeID> &leg_packed_path_forward,
|
||||
std::vector<NodeID> &leg_packed_path_reverse) const
|
||||
{
|
||||
if (search_to_forward_node)
|
||||
{
|
||||
forward_heap.Clear();
|
||||
reverse_heap.Clear();
|
||||
reverse_heap.Insert(target_phantom.forward_segment_id.id,
|
||||
target_phantom.GetForwardWeightPlusOffset(),
|
||||
target_phantom.forward_segment_id.id);
|
||||
|
||||
if (search_from_forward_node)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.forward_segment_id.id,
|
||||
total_weight_to_forward -
|
||||
source_phantom.GetForwardWeightPlusOffset(),
|
||||
source_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (search_from_reverse_node)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.reverse_segment_id.id,
|
||||
total_weight_to_reverse -
|
||||
source_phantom.GetReverseWeightPlusOffset(),
|
||||
source_phantom.reverse_segment_id.id);
|
||||
}
|
||||
BOOST_ASSERT(forward_heap.Size() > 0);
|
||||
BOOST_ASSERT(reverse_heap.Size() > 0);
|
||||
|
||||
if (facade->GetCoreSize() > 0)
|
||||
{
|
||||
forward_core_heap.Clear();
|
||||
reverse_core_heap.Clear();
|
||||
BOOST_ASSERT(forward_core_heap.Size() == 0);
|
||||
BOOST_ASSERT(reverse_core_heap.Size() == 0);
|
||||
super::SearchWithCore(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
new_total_weight_to_forward,
|
||||
leg_packed_path_forward,
|
||||
super::NeedsLoopForward(source_phantom, target_phantom),
|
||||
DO_NOT_FORCE_LOOP);
|
||||
}
|
||||
else
|
||||
{
|
||||
super::Search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
new_total_weight_to_forward,
|
||||
leg_packed_path_forward,
|
||||
super::NeedsLoopForward(source_phantom, target_phantom),
|
||||
DO_NOT_FORCE_LOOP);
|
||||
}
|
||||
}
|
||||
|
||||
if (search_to_reverse_node)
|
||||
{
|
||||
forward_heap.Clear();
|
||||
reverse_heap.Clear();
|
||||
reverse_heap.Insert(target_phantom.reverse_segment_id.id,
|
||||
target_phantom.GetReverseWeightPlusOffset(),
|
||||
target_phantom.reverse_segment_id.id);
|
||||
if (search_from_forward_node)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.forward_segment_id.id,
|
||||
total_weight_to_forward -
|
||||
source_phantom.GetForwardWeightPlusOffset(),
|
||||
source_phantom.forward_segment_id.id);
|
||||
}
|
||||
if (search_from_reverse_node)
|
||||
{
|
||||
forward_heap.Insert(source_phantom.reverse_segment_id.id,
|
||||
total_weight_to_reverse -
|
||||
source_phantom.GetReverseWeightPlusOffset(),
|
||||
source_phantom.reverse_segment_id.id);
|
||||
}
|
||||
BOOST_ASSERT(forward_heap.Size() > 0);
|
||||
BOOST_ASSERT(reverse_heap.Size() > 0);
|
||||
if (facade->GetCoreSize() > 0)
|
||||
{
|
||||
forward_core_heap.Clear();
|
||||
reverse_core_heap.Clear();
|
||||
BOOST_ASSERT(forward_core_heap.Size() == 0);
|
||||
BOOST_ASSERT(reverse_core_heap.Size() == 0);
|
||||
super::SearchWithCore(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
new_total_weight_to_reverse,
|
||||
leg_packed_path_reverse,
|
||||
DO_NOT_FORCE_LOOP,
|
||||
super::NeedsLoopBackwards(source_phantom, target_phantom));
|
||||
}
|
||||
else
|
||||
{
|
||||
super::Search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
new_total_weight_to_reverse,
|
||||
leg_packed_path_reverse,
|
||||
DO_NOT_FORCE_LOOP,
|
||||
super::NeedsLoopBackwards(source_phantom, target_phantom));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShortestPathRouting::UnpackLegs(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const std::vector<PhantomNodes> &phantom_nodes_vector,
|
||||
const std::vector<NodeID> &total_packed_path,
|
||||
const std::vector<std::size_t> &packed_leg_begin,
|
||||
const int shortest_path_length,
|
||||
InternalRouteResult &raw_route_data) const
|
||||
{
|
||||
raw_route_data.unpacked_path_segments.resize(packed_leg_begin.size() - 1);
|
||||
|
||||
raw_route_data.shortest_path_length = shortest_path_length;
|
||||
|
||||
for (const auto current_leg : util::irange<std::size_t>(0UL, packed_leg_begin.size() - 1))
|
||||
{
|
||||
auto leg_begin = total_packed_path.begin() + packed_leg_begin[current_leg];
|
||||
auto leg_end = total_packed_path.begin() + packed_leg_begin[current_leg + 1];
|
||||
const auto &unpack_phantom_node_pair = phantom_nodes_vector[current_leg];
|
||||
super::UnpackPath(facade,
|
||||
leg_begin,
|
||||
leg_end,
|
||||
unpack_phantom_node_pair,
|
||||
raw_route_data.unpacked_path_segments[current_leg]);
|
||||
|
||||
raw_route_data.source_traversed_in_reverse.push_back(
|
||||
(*leg_begin != phantom_nodes_vector[current_leg].source_phantom.forward_segment_id.id));
|
||||
raw_route_data.target_traversed_in_reverse.push_back(
|
||||
(*std::prev(leg_end) !=
|
||||
phantom_nodes_vector[current_leg].target_phantom.forward_segment_id.id));
|
||||
}
|
||||
}
|
||||
|
||||
void ShortestPathRouting::operator()(const std::shared_ptr<const datafacade::BaseDataFacade> facade,
|
||||
const std::vector<PhantomNodes> &phantom_nodes_vector,
|
||||
const boost::optional<bool> continue_straight_at_waypoint,
|
||||
InternalRouteResult &raw_route_data) const
|
||||
{
|
||||
const bool allow_uturn_at_waypoint =
|
||||
!(continue_straight_at_waypoint ? *continue_straight_at_waypoint
|
||||
: facade->GetContinueStraightDefault());
|
||||
|
||||
engine_working_data.InitializeOrClearFirstThreadLocalStorage(facade->GetNumberOfNodes());
|
||||
engine_working_data.InitializeOrClearSecondThreadLocalStorage(facade->GetNumberOfNodes());
|
||||
|
||||
QueryHeap &forward_heap = *(engine_working_data.forward_heap_1);
|
||||
QueryHeap &reverse_heap = *(engine_working_data.reverse_heap_1);
|
||||
QueryHeap &forward_core_heap = *(engine_working_data.forward_heap_2);
|
||||
QueryHeap &reverse_core_heap = *(engine_working_data.reverse_heap_2);
|
||||
|
||||
int total_weight_to_forward = 0;
|
||||
int total_weight_to_reverse = 0;
|
||||
bool search_from_forward_node =
|
||||
phantom_nodes_vector.front().source_phantom.forward_segment_id.enabled;
|
||||
bool search_from_reverse_node =
|
||||
phantom_nodes_vector.front().source_phantom.reverse_segment_id.enabled;
|
||||
|
||||
std::vector<NodeID> prev_packed_leg_to_forward;
|
||||
std::vector<NodeID> prev_packed_leg_to_reverse;
|
||||
|
||||
std::vector<NodeID> total_packed_path_to_forward;
|
||||
std::vector<std::size_t> packed_leg_to_forward_begin;
|
||||
std::vector<NodeID> total_packed_path_to_reverse;
|
||||
std::vector<std::size_t> packed_leg_to_reverse_begin;
|
||||
|
||||
std::size_t current_leg = 0;
|
||||
// this implements a dynamic program that finds the shortest route through
|
||||
// a list of vias
|
||||
for (const auto &phantom_node_pair : phantom_nodes_vector)
|
||||
{
|
||||
int new_total_weight_to_forward = INVALID_EDGE_WEIGHT;
|
||||
int new_total_weight_to_reverse = INVALID_EDGE_WEIGHT;
|
||||
|
||||
std::vector<NodeID> packed_leg_to_forward;
|
||||
std::vector<NodeID> packed_leg_to_reverse;
|
||||
|
||||
const auto &source_phantom = phantom_node_pair.source_phantom;
|
||||
const auto &target_phantom = phantom_node_pair.target_phantom;
|
||||
|
||||
bool search_to_forward_node = target_phantom.forward_segment_id.enabled;
|
||||
bool search_to_reverse_node = target_phantom.reverse_segment_id.enabled;
|
||||
|
||||
BOOST_ASSERT(!search_from_forward_node || source_phantom.forward_segment_id.enabled);
|
||||
BOOST_ASSERT(!search_from_reverse_node || source_phantom.reverse_segment_id.enabled);
|
||||
|
||||
BOOST_ASSERT(search_from_forward_node || search_from_reverse_node);
|
||||
|
||||
if (search_to_reverse_node || search_to_forward_node)
|
||||
{
|
||||
if (allow_uturn_at_waypoint)
|
||||
{
|
||||
SearchWithUTurn(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
search_from_forward_node,
|
||||
search_from_reverse_node,
|
||||
search_to_forward_node,
|
||||
search_to_reverse_node,
|
||||
source_phantom,
|
||||
target_phantom,
|
||||
total_weight_to_forward,
|
||||
total_weight_to_reverse,
|
||||
new_total_weight_to_forward,
|
||||
packed_leg_to_forward);
|
||||
// if only the reverse node is valid (e.g. when using the match plugin) we
|
||||
// actually need to move
|
||||
if (!target_phantom.forward_segment_id.enabled)
|
||||
{
|
||||
BOOST_ASSERT(target_phantom.reverse_segment_id.enabled);
|
||||
new_total_weight_to_reverse = new_total_weight_to_forward;
|
||||
packed_leg_to_reverse = std::move(packed_leg_to_forward);
|
||||
new_total_weight_to_forward = INVALID_EDGE_WEIGHT;
|
||||
}
|
||||
else if (target_phantom.reverse_segment_id.enabled)
|
||||
{
|
||||
new_total_weight_to_reverse = new_total_weight_to_forward;
|
||||
packed_leg_to_reverse = packed_leg_to_forward;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Search(facade,
|
||||
forward_heap,
|
||||
reverse_heap,
|
||||
forward_core_heap,
|
||||
reverse_core_heap,
|
||||
search_from_forward_node,
|
||||
search_from_reverse_node,
|
||||
search_to_forward_node,
|
||||
search_to_reverse_node,
|
||||
source_phantom,
|
||||
target_phantom,
|
||||
total_weight_to_forward,
|
||||
total_weight_to_reverse,
|
||||
new_total_weight_to_forward,
|
||||
new_total_weight_to_reverse,
|
||||
packed_leg_to_forward,
|
||||
packed_leg_to_reverse);
|
||||
}
|
||||
}
|
||||
|
||||
// No path found for both target nodes?
|
||||
if ((INVALID_EDGE_WEIGHT == new_total_weight_to_forward) &&
|
||||
(INVALID_EDGE_WEIGHT == new_total_weight_to_reverse))
|
||||
{
|
||||
raw_route_data.shortest_path_length = INVALID_EDGE_WEIGHT;
|
||||
raw_route_data.alternative_path_length = INVALID_EDGE_WEIGHT;
|
||||
return;
|
||||
}
|
||||
|
||||
// we need to figure out how the new legs connect to the previous ones
|
||||
if (current_leg > 0)
|
||||
{
|
||||
bool forward_to_forward =
|
||||
(new_total_weight_to_forward != INVALID_EDGE_WEIGHT) &&
|
||||
packed_leg_to_forward.front() == source_phantom.forward_segment_id.id;
|
||||
bool reverse_to_forward =
|
||||
(new_total_weight_to_forward != INVALID_EDGE_WEIGHT) &&
|
||||
packed_leg_to_forward.front() == source_phantom.reverse_segment_id.id;
|
||||
bool forward_to_reverse =
|
||||
(new_total_weight_to_reverse != INVALID_EDGE_WEIGHT) &&
|
||||
packed_leg_to_reverse.front() == source_phantom.forward_segment_id.id;
|
||||
bool reverse_to_reverse =
|
||||
(new_total_weight_to_reverse != INVALID_EDGE_WEIGHT) &&
|
||||
packed_leg_to_reverse.front() == source_phantom.reverse_segment_id.id;
|
||||
|
||||
BOOST_ASSERT(!forward_to_forward || !reverse_to_forward);
|
||||
BOOST_ASSERT(!forward_to_reverse || !reverse_to_reverse);
|
||||
|
||||
// in this case we always need to copy
|
||||
if (forward_to_forward && forward_to_reverse)
|
||||
{
|
||||
// in this case we copy the path leading to the source forward node
|
||||
// and change the case
|
||||
total_packed_path_to_reverse = total_packed_path_to_forward;
|
||||
packed_leg_to_reverse_begin = packed_leg_to_forward_begin;
|
||||
forward_to_reverse = false;
|
||||
reverse_to_reverse = true;
|
||||
}
|
||||
else if (reverse_to_forward && reverse_to_reverse)
|
||||
{
|
||||
total_packed_path_to_forward = total_packed_path_to_reverse;
|
||||
packed_leg_to_forward_begin = packed_leg_to_reverse_begin;
|
||||
reverse_to_forward = false;
|
||||
forward_to_forward = true;
|
||||
}
|
||||
BOOST_ASSERT(!forward_to_forward || !forward_to_reverse);
|
||||
BOOST_ASSERT(!reverse_to_forward || !reverse_to_reverse);
|
||||
|
||||
// in this case we just need to swap to regain the correct mapping
|
||||
if (reverse_to_forward || forward_to_reverse)
|
||||
{
|
||||
total_packed_path_to_forward.swap(total_packed_path_to_reverse);
|
||||
packed_leg_to_forward_begin.swap(packed_leg_to_reverse_begin);
|
||||
}
|
||||
}
|
||||
|
||||
if (new_total_weight_to_forward != INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
BOOST_ASSERT(target_phantom.forward_segment_id.enabled);
|
||||
|
||||
packed_leg_to_forward_begin.push_back(total_packed_path_to_forward.size());
|
||||
total_packed_path_to_forward.insert(total_packed_path_to_forward.end(),
|
||||
packed_leg_to_forward.begin(),
|
||||
packed_leg_to_forward.end());
|
||||
search_from_forward_node = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
total_packed_path_to_forward.clear();
|
||||
packed_leg_to_forward_begin.clear();
|
||||
search_from_forward_node = false;
|
||||
}
|
||||
|
||||
if (new_total_weight_to_reverse != INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
BOOST_ASSERT(target_phantom.reverse_segment_id.enabled);
|
||||
|
||||
packed_leg_to_reverse_begin.push_back(total_packed_path_to_reverse.size());
|
||||
total_packed_path_to_reverse.insert(total_packed_path_to_reverse.end(),
|
||||
packed_leg_to_reverse.begin(),
|
||||
packed_leg_to_reverse.end());
|
||||
search_from_reverse_node = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
total_packed_path_to_reverse.clear();
|
||||
packed_leg_to_reverse_begin.clear();
|
||||
search_from_reverse_node = false;
|
||||
}
|
||||
|
||||
prev_packed_leg_to_forward = std::move(packed_leg_to_forward);
|
||||
prev_packed_leg_to_reverse = std::move(packed_leg_to_reverse);
|
||||
|
||||
total_weight_to_forward = new_total_weight_to_forward;
|
||||
total_weight_to_reverse = new_total_weight_to_reverse;
|
||||
|
||||
++current_leg;
|
||||
}
|
||||
|
||||
BOOST_ASSERT(total_weight_to_forward != INVALID_EDGE_WEIGHT ||
|
||||
total_weight_to_reverse != INVALID_EDGE_WEIGHT);
|
||||
|
||||
// We make sure the fastest route is always in packed_legs_to_forward
|
||||
if (total_weight_to_forward > total_weight_to_reverse)
|
||||
{
|
||||
// insert sentinel
|
||||
packed_leg_to_reverse_begin.push_back(total_packed_path_to_reverse.size());
|
||||
BOOST_ASSERT(packed_leg_to_reverse_begin.size() == phantom_nodes_vector.size() + 1);
|
||||
|
||||
UnpackLegs(facade,
|
||||
phantom_nodes_vector,
|
||||
total_packed_path_to_reverse,
|
||||
packed_leg_to_reverse_begin,
|
||||
total_weight_to_reverse,
|
||||
raw_route_data);
|
||||
}
|
||||
else
|
||||
{
|
||||
// insert sentinel
|
||||
packed_leg_to_forward_begin.push_back(total_packed_path_to_forward.size());
|
||||
BOOST_ASSERT(packed_leg_to_forward_begin.size() == phantom_nodes_vector.size() + 1);
|
||||
|
||||
UnpackLegs(facade,
|
||||
phantom_nodes_vector,
|
||||
total_packed_path_to_forward,
|
||||
packed_leg_to_forward_begin,
|
||||
total_weight_to_forward,
|
||||
raw_route_data);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace routing_algorithms
|
||||
} // namespace engine
|
||||
} // namespace osrm
|
Loading…
Reference in New Issue
Block a user