refactor graph contractor for readability
move to a dedicated dijkstra, separate code and header reduce heap size, we don't use more than 2000 nodes, so why allocate 170k?
This commit is contained in:
parent
fce3bb180c
commit
226ee62981
@ -78,20 +78,19 @@ class Contractor
|
||||
private:
|
||||
ContractorConfig config;
|
||||
|
||||
EdgeID
|
||||
LoadEdgeExpandedGraph(const std::string &edge_based_graph_path,
|
||||
util::DeallocatingVector<extractor::EdgeBasedEdge> &edge_based_edge_list,
|
||||
std::vector<EdgeWeight> &node_weights,
|
||||
const std::string &edge_segment_lookup_path,
|
||||
const std::string &edge_penalty_path,
|
||||
const std::vector<std::string> &segment_speed_path,
|
||||
const std::vector<std::string> &turn_penalty_path,
|
||||
const std::string &nodes_filename,
|
||||
const std::string &geometry_filename,
|
||||
const std::string &datasource_names_filename,
|
||||
const std::string &datasource_indexes_filename,
|
||||
const std::string &rtree_leaf_filename,
|
||||
const double log_edge_updates_factor);
|
||||
EdgeID LoadEdgeExpandedGraph(const std::string &edge_based_graph_path,
|
||||
std::vector<extractor::EdgeBasedEdge> &edge_based_edge_list,
|
||||
std::vector<EdgeWeight> &node_weights,
|
||||
const std::string &edge_segment_lookup_path,
|
||||
const std::string &edge_penalty_path,
|
||||
const std::vector<std::string> &segment_speed_path,
|
||||
const std::vector<std::string> &turn_penalty_path,
|
||||
const std::string &nodes_filename,
|
||||
const std::string &geometry_filename,
|
||||
const std::string &datasource_names_filename,
|
||||
const std::string &datasource_indexes_filename,
|
||||
const std::string &rtree_leaf_filename,
|
||||
const double log_edge_updates_factor);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
50
include/contractor/contractor_dijkstra.hpp
Normal file
50
include/contractor/contractor_dijkstra.hpp
Normal file
@ -0,0 +1,50 @@
|
||||
#ifndef OSRM_CONTRACTOR_DIJKSTRA_HPP
|
||||
#define OSRM_CONTRACTOR_DIJKSTRA_HPP
|
||||
|
||||
#include "contractor/contractor_graph.hpp"
|
||||
#include "contractor/contractor_heap.hpp"
|
||||
#include "util/typedefs.hpp"
|
||||
|
||||
#include <cstddef>
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace contractor
|
||||
{
|
||||
|
||||
// allow access to the heap itself, add Dijkstra functionality on top
|
||||
class ContractorDijkstra
|
||||
{
|
||||
public:
|
||||
ContractorDijkstra(std::size_t heap_size);
|
||||
|
||||
// search the graph up
|
||||
void Run(const unsigned number_of_targets,
|
||||
const int node_limit,
|
||||
const int weight_limit,
|
||||
const NodeID forbidden_node,
|
||||
const ContractorGraph &graph);
|
||||
|
||||
// adaption of the heap interface
|
||||
void Clear();
|
||||
bool WasInserted(const NodeID node) const;
|
||||
void Insert(const NodeID node,
|
||||
const ContractorHeap::WeightType weight,
|
||||
const ContractorHeap::DataType &data);
|
||||
|
||||
// cannot be const due to node-hash access in the binary heap :(
|
||||
ContractorHeap::WeightType GetKey(const NodeID node);
|
||||
|
||||
private:
|
||||
void RelaxNode(const NodeID node,
|
||||
const int node_weight,
|
||||
const NodeID forbidden_node,
|
||||
const ContractorGraph &graph);
|
||||
|
||||
ContractorHeap heap;
|
||||
};
|
||||
|
||||
} // namespace contractor
|
||||
} // namespace osrm
|
||||
|
||||
#endif // OSRM_CONTRACTOR_DIJKSTRA_HPP
|
44
include/contractor/contractor_graph.hpp
Normal file
44
include/contractor/contractor_graph.hpp
Normal file
@ -0,0 +1,44 @@
|
||||
#ifndef OSRM_CONTRACTOR_CONTRACTOR_GRAPH_HPP_
|
||||
#define OSRM_CONTRACTOR_CONTRACTOR_GRAPH_HPP_
|
||||
|
||||
#include "util/dynamic_graph.hpp"
|
||||
#include <algorithm>
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace contractor
|
||||
{
|
||||
|
||||
struct ContractorEdgeData
|
||||
{
|
||||
ContractorEdgeData()
|
||||
: weight(0), id(0), originalEdges(0), shortcut(0), forward(0), backward(0),
|
||||
is_original_via_node_ID(false)
|
||||
{
|
||||
}
|
||||
ContractorEdgeData(unsigned weight,
|
||||
unsigned original_edges,
|
||||
unsigned id,
|
||||
bool shortcut,
|
||||
bool forward,
|
||||
bool backward)
|
||||
: weight(weight), id(id), originalEdges(std::min((unsigned)1 << 28, original_edges)),
|
||||
shortcut(shortcut), forward(forward), backward(backward), is_original_via_node_ID(false)
|
||||
{
|
||||
}
|
||||
unsigned weight;
|
||||
unsigned id;
|
||||
unsigned originalEdges : 28;
|
||||
bool shortcut : 1;
|
||||
bool forward : 1;
|
||||
bool backward : 1;
|
||||
bool is_original_via_node_ID : 1;
|
||||
};
|
||||
|
||||
using ContractorGraph = util::DynamicGraph<ContractorEdgeData>;
|
||||
using ContractorEdge = ContractorGraph::InputEdge;
|
||||
|
||||
} // namespace contractor
|
||||
} // namespace osrm
|
||||
|
||||
#endif // OSRM_CONTRACTOR_CONTRACTOR_GRAPH_HPP_
|
27
include/contractor/contractor_heap.hpp
Normal file
27
include/contractor/contractor_heap.hpp
Normal file
@ -0,0 +1,27 @@
|
||||
#ifndef OSRM_CONTRACTOR_CONTRACTOR_HEAP_HPP_
|
||||
#define OSRM_CONTRACTOR_CONTRACTOR_HEAP_HPP_
|
||||
|
||||
#include "util/binary_heap.hpp"
|
||||
#include "util/typedefs.hpp"
|
||||
#include "util/xor_fast_hash_storage.hpp"
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace contractor
|
||||
{
|
||||
struct ContractorHeapData
|
||||
{
|
||||
ContractorHeapData() {}
|
||||
ContractorHeapData(short hop_, bool target_) : hop(hop_), target(target_) {}
|
||||
|
||||
short hop = 0;
|
||||
bool target = false;
|
||||
};
|
||||
|
||||
using ContractorHeap = util::
|
||||
BinaryHeap<NodeID, NodeID, int, ContractorHeapData, util::XORFastHashStorage<NodeID, NodeID>>;
|
||||
|
||||
} // namespace contractor
|
||||
} // namespace osrm
|
||||
|
||||
#endif // OSRM_CONTRACTOR_CONTRACTOR_HEAP_HPP_
|
@ -1,17 +1,16 @@
|
||||
#ifndef GRAPH_CONTRACTOR_HPP
|
||||
#define GRAPH_CONTRACTOR_HPP
|
||||
#ifndef OSRM_CONTRACTOR_GRAPH_CONTRACTOR_HPP
|
||||
#define OSRM_CONTRACTOR_GRAPH_CONTRACTOR_HPP
|
||||
|
||||
#include "contractor/contractor_dijkstra.hpp"
|
||||
#include "contractor/contractor_graph.hpp"
|
||||
#include "contractor/query_edge.hpp"
|
||||
#include "util/binary_heap.hpp"
|
||||
#include "util/deallocating_vector.hpp"
|
||||
#include "util/dynamic_graph.hpp"
|
||||
#include "util/integer_range.hpp"
|
||||
#include "util/log.hpp"
|
||||
#include "util/percent.hpp"
|
||||
#include "util/timing_util.hpp"
|
||||
#include "util/typedefs.hpp"
|
||||
#include "util/xor_fast_hash.hpp"
|
||||
#include "util/xor_fast_hash_storage.hpp"
|
||||
|
||||
#include <boost/assert.hpp>
|
||||
|
||||
@ -34,59 +33,12 @@ namespace contractor
|
||||
class GraphContractor
|
||||
{
|
||||
private:
|
||||
struct ContractorEdgeData
|
||||
{
|
||||
ContractorEdgeData()
|
||||
: weight(0), id(0), originalEdges(0), shortcut(0), forward(0), backward(0),
|
||||
is_original_via_node_ID(false)
|
||||
{
|
||||
}
|
||||
ContractorEdgeData(unsigned weight,
|
||||
unsigned original_edges,
|
||||
unsigned id,
|
||||
bool shortcut,
|
||||
bool forward,
|
||||
bool backward)
|
||||
: weight(weight), id(id), originalEdges(std::min((unsigned)1 << 28, original_edges)),
|
||||
shortcut(shortcut), forward(forward), backward(backward),
|
||||
is_original_via_node_ID(false)
|
||||
{
|
||||
}
|
||||
unsigned weight;
|
||||
unsigned id;
|
||||
unsigned originalEdges : 28;
|
||||
bool shortcut : 1;
|
||||
bool forward : 1;
|
||||
bool backward : 1;
|
||||
bool is_original_via_node_ID : 1;
|
||||
} data;
|
||||
|
||||
struct ContractorHeapData
|
||||
{
|
||||
ContractorHeapData() {}
|
||||
ContractorHeapData(short hop_, bool target_) : hop(hop_), target(target_) {}
|
||||
|
||||
short hop = 0;
|
||||
bool target = false;
|
||||
};
|
||||
|
||||
using ContractorGraph = util::DynamicGraph<ContractorEdgeData>;
|
||||
// using ContractorHeap = util::BinaryHeap<NodeID, NodeID, int, ContractorHeapData,
|
||||
// ArrayStorage<NodeID, NodeID>
|
||||
// >;
|
||||
using ContractorHeap = util::BinaryHeap<NodeID,
|
||||
NodeID,
|
||||
int,
|
||||
ContractorHeapData,
|
||||
util::XORFastHashStorage<NodeID, NodeID>>;
|
||||
using ContractorEdge = ContractorGraph::InputEdge;
|
||||
|
||||
struct ContractorThreadData
|
||||
{
|
||||
ContractorHeap heap;
|
||||
ContractorDijkstra dijkstra;
|
||||
std::vector<ContractorEdge> inserted_edges;
|
||||
std::vector<NodeID> neighbours;
|
||||
explicit ContractorThreadData(NodeID nodes) : heap(nodes) {}
|
||||
explicit ContractorThreadData(NodeID nodes) : dijkstra(nodes) {}
|
||||
};
|
||||
|
||||
using NodeDepth = int;
|
||||
@ -121,7 +73,8 @@ class GraphContractor
|
||||
auto &ref = data.local(exists);
|
||||
if (!exists)
|
||||
{
|
||||
ref = std::make_shared<ContractorThreadData>(number_of_nodes);
|
||||
// ref = std::make_shared<ContractorThreadData>(number_of_nodes);
|
||||
ref = std::make_shared<ContractorThreadData>(4000);
|
||||
}
|
||||
|
||||
return ref.get();
|
||||
@ -134,478 +87,23 @@ class GraphContractor
|
||||
};
|
||||
|
||||
public:
|
||||
template <class ContainerT>
|
||||
GraphContractor(int nodes, ContainerT &input_edge_list)
|
||||
: GraphContractor(nodes, input_edge_list, {}, {})
|
||||
{
|
||||
}
|
||||
GraphContractor(int nodes, std::vector<ContractorEdge> input_edge_list);
|
||||
|
||||
template <class ContainerT>
|
||||
GraphContractor(int nodes,
|
||||
ContainerT &input_edge_list,
|
||||
std::vector<ContractorEdge> edges,
|
||||
std::vector<float> &&node_levels_,
|
||||
std::vector<EdgeWeight> &&node_weights_)
|
||||
: node_levels(std::move(node_levels_)), node_weights(std::move(node_weights_))
|
||||
{
|
||||
std::vector<ContractorEdge> edges;
|
||||
edges.reserve(input_edge_list.size() * 2);
|
||||
|
||||
const auto dend = input_edge_list.dend();
|
||||
for (auto diter = input_edge_list.dbegin(); diter != dend; ++diter)
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
if (static_cast<unsigned int>(std::max(diter->weight, 1)) > 24 * 60 * 60 * 10)
|
||||
{
|
||||
util::Log(logWARNING) << "Edge weight large -> "
|
||||
<< static_cast<unsigned int>(std::max(diter->weight, 1))
|
||||
<< " : " << static_cast<unsigned int>(diter->source) << " -> "
|
||||
<< static_cast<unsigned int>(diter->target);
|
||||
}
|
||||
#endif
|
||||
edges.emplace_back(diter->source,
|
||||
diter->target,
|
||||
static_cast<unsigned int>(std::max(diter->weight, 1)),
|
||||
1,
|
||||
diter->edge_id,
|
||||
false,
|
||||
diter->forward ? true : false,
|
||||
diter->backward ? true : false);
|
||||
|
||||
edges.emplace_back(diter->target,
|
||||
diter->source,
|
||||
static_cast<unsigned int>(std::max(diter->weight, 1)),
|
||||
1,
|
||||
diter->edge_id,
|
||||
false,
|
||||
diter->backward ? true : false,
|
||||
diter->forward ? true : false);
|
||||
}
|
||||
// clear input vector
|
||||
input_edge_list.clear();
|
||||
// FIXME not sure if we need this
|
||||
edges.shrink_to_fit();
|
||||
|
||||
tbb::parallel_sort(edges.begin(), edges.end());
|
||||
NodeID edge = 0;
|
||||
for (NodeID i = 0; i < edges.size();)
|
||||
{
|
||||
const NodeID source = edges[i].source;
|
||||
const NodeID target = edges[i].target;
|
||||
const NodeID id = edges[i].data.id;
|
||||
// remove eigenloops
|
||||
if (source == target)
|
||||
{
|
||||
++i;
|
||||
continue;
|
||||
}
|
||||
ContractorEdge forward_edge;
|
||||
ContractorEdge reverse_edge;
|
||||
forward_edge.source = reverse_edge.source = source;
|
||||
forward_edge.target = reverse_edge.target = target;
|
||||
forward_edge.data.forward = reverse_edge.data.backward = true;
|
||||
forward_edge.data.backward = reverse_edge.data.forward = false;
|
||||
forward_edge.data.shortcut = reverse_edge.data.shortcut = false;
|
||||
forward_edge.data.id = reverse_edge.data.id = id;
|
||||
forward_edge.data.originalEdges = reverse_edge.data.originalEdges = 1;
|
||||
forward_edge.data.weight = reverse_edge.data.weight = INVALID_EDGE_WEIGHT;
|
||||
// remove parallel edges
|
||||
while (i < edges.size() && edges[i].source == source && edges[i].target == target)
|
||||
{
|
||||
if (edges[i].data.forward)
|
||||
{
|
||||
forward_edge.data.weight =
|
||||
std::min(edges[i].data.weight, forward_edge.data.weight);
|
||||
}
|
||||
if (edges[i].data.backward)
|
||||
{
|
||||
reverse_edge.data.weight =
|
||||
std::min(edges[i].data.weight, reverse_edge.data.weight);
|
||||
}
|
||||
++i;
|
||||
}
|
||||
// merge edges (s,t) and (t,s) into bidirectional edge
|
||||
if (forward_edge.data.weight == reverse_edge.data.weight)
|
||||
{
|
||||
if ((int)forward_edge.data.weight != INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
forward_edge.data.backward = true;
|
||||
edges[edge++] = forward_edge;
|
||||
}
|
||||
}
|
||||
else
|
||||
{ // insert seperate edges
|
||||
if (((int)forward_edge.data.weight) != INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
edges[edge++] = forward_edge;
|
||||
}
|
||||
if ((int)reverse_edge.data.weight != INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
edges[edge++] = reverse_edge;
|
||||
}
|
||||
}
|
||||
}
|
||||
util::Log() << "merged " << edges.size() - edge << " edges out of " << edges.size();
|
||||
edges.resize(edge);
|
||||
contractor_graph = std::make_shared<ContractorGraph>(nodes, edges);
|
||||
edges.clear();
|
||||
edges.shrink_to_fit();
|
||||
|
||||
BOOST_ASSERT(0 == edges.capacity());
|
||||
util::Log() << "contractor finished initalization";
|
||||
}
|
||||
std::vector<EdgeWeight> &&node_weights_);
|
||||
|
||||
/* Flush all data from the contraction to disc and reorder stuff for better locality */
|
||||
void FlushDataAndRebuildContractorGraph(ThreadDataContainer &thread_data_list,
|
||||
std::vector<RemainingNodeData> &remaining_nodes,
|
||||
std::vector<float> &node_priorities)
|
||||
{
|
||||
util::DeallocatingVector<ContractorEdge> new_edge_set; // this one is not explicitely
|
||||
// cleared since it goes out of
|
||||
// scope anywa
|
||||
// Delete old heap data to free memory that we need for the coming operations
|
||||
thread_data_list.data.clear();
|
||||
// Create new priority array
|
||||
std::vector<float> new_node_priority(remaining_nodes.size());
|
||||
std::vector<EdgeWeight> new_node_weights(remaining_nodes.size());
|
||||
// this map gives the old IDs from the new ones, necessary to get a consistent graph
|
||||
// at the end of contraction
|
||||
orig_node_id_from_new_node_id_map.resize(remaining_nodes.size());
|
||||
// this map gives the new IDs from the old ones, necessary to remap targets from the
|
||||
// remaining graph
|
||||
const auto number_of_nodes = contractor_graph->GetNumberOfNodes();
|
||||
std::vector<NodeID> new_node_id_from_orig_id_map(number_of_nodes, SPECIAL_NODEID);
|
||||
for (const auto new_node_id : util::irange<std::size_t>(0UL, remaining_nodes.size()))
|
||||
{
|
||||
auto &node = remaining_nodes[new_node_id];
|
||||
BOOST_ASSERT(node_priorities.size() > node.id);
|
||||
new_node_priority[new_node_id] = node_priorities[node.id];
|
||||
BOOST_ASSERT(node_weights.size() > node.id);
|
||||
new_node_weights[new_node_id] = node_weights[node.id];
|
||||
}
|
||||
// build forward and backward renumbering map and remap ids in remaining_nodes
|
||||
for (const auto new_node_id : util::irange<std::size_t>(0UL, remaining_nodes.size()))
|
||||
{
|
||||
auto &node = remaining_nodes[new_node_id];
|
||||
// create renumbering maps in both directions
|
||||
orig_node_id_from_new_node_id_map[new_node_id] = node.id;
|
||||
new_node_id_from_orig_id_map[node.id] = new_node_id;
|
||||
node.id = new_node_id;
|
||||
}
|
||||
// walk over all nodes
|
||||
for (const auto source : util::irange<NodeID>(0UL, contractor_graph->GetNumberOfNodes()))
|
||||
{
|
||||
for (auto current_edge : contractor_graph->GetAdjacentEdgeRange(source))
|
||||
{
|
||||
ContractorGraph::EdgeData &data = contractor_graph->GetEdgeData(current_edge);
|
||||
const NodeID target = contractor_graph->GetTarget(current_edge);
|
||||
if (SPECIAL_NODEID == new_node_id_from_orig_id_map[source])
|
||||
{
|
||||
external_edge_list.push_back({source, target, data});
|
||||
}
|
||||
else
|
||||
{
|
||||
// node is not yet contracted.
|
||||
// add (renumbered) outgoing edges to new util::DynamicGraph.
|
||||
ContractorEdge new_edge = {new_node_id_from_orig_id_map[source],
|
||||
new_node_id_from_orig_id_map[target],
|
||||
data};
|
||||
new_edge.data.is_original_via_node_ID = true;
|
||||
BOOST_ASSERT_MSG(SPECIAL_NODEID != new_node_id_from_orig_id_map[source],
|
||||
"new source id not resolveable");
|
||||
BOOST_ASSERT_MSG(SPECIAL_NODEID != new_node_id_from_orig_id_map[target],
|
||||
"new target id not resolveable");
|
||||
new_edge_set.push_back(new_edge);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Replace old priorities array by new one
|
||||
node_priorities.swap(new_node_priority);
|
||||
// Delete old node_priorities vector
|
||||
node_weights.swap(new_node_weights);
|
||||
// old Graph is removed
|
||||
contractor_graph.reset();
|
||||
// create new graph
|
||||
tbb::parallel_sort(new_edge_set.begin(), new_edge_set.end());
|
||||
contractor_graph = std::make_shared<ContractorGraph>(remaining_nodes.size(), new_edge_set);
|
||||
new_edge_set.clear();
|
||||
// INFO: MAKE SURE THIS IS THE LAST OPERATION OF THE FLUSH!
|
||||
// reinitialize heaps and ThreadData objects with appropriate size
|
||||
thread_data_list.number_of_nodes = contractor_graph->GetNumberOfNodes();
|
||||
}
|
||||
std::vector<float> &node_priorities);
|
||||
|
||||
void Run(double core_factor = 1.0)
|
||||
{
|
||||
// for the preperation we can use a big grain size, which is much faster (probably cache)
|
||||
const constexpr size_t InitGrainSize = 100000;
|
||||
const constexpr size_t PQGrainSize = 100000;
|
||||
// auto_partitioner will automatically increase the blocksize if we have
|
||||
// a lot of data. It is *important* for the last loop iterations
|
||||
// (which have a very small dataset) that it is devisible.
|
||||
const constexpr size_t IndependentGrainSize = 1;
|
||||
const constexpr size_t ContractGrainSize = 1;
|
||||
const constexpr size_t NeighboursGrainSize = 1;
|
||||
const constexpr size_t DeleteGrainSize = 1;
|
||||
void Run(double core_factor = 1.0);
|
||||
|
||||
const NodeID number_of_nodes = contractor_graph->GetNumberOfNodes();
|
||||
void GetCoreMarker(std::vector<bool> &out_is_core_node);
|
||||
|
||||
ThreadDataContainer thread_data_list(number_of_nodes);
|
||||
|
||||
NodeID number_of_contracted_nodes = 0;
|
||||
std::vector<NodeDepth> node_depth;
|
||||
std::vector<float> node_priorities;
|
||||
is_core_node.resize(number_of_nodes, false);
|
||||
|
||||
std::vector<RemainingNodeData> remaining_nodes(number_of_nodes);
|
||||
// initialize priorities in parallel
|
||||
tbb::parallel_for(tbb::blocked_range<NodeID>(0, number_of_nodes, InitGrainSize),
|
||||
[this, &remaining_nodes](const tbb::blocked_range<NodeID> &range) {
|
||||
for (auto x = range.begin(), end = range.end(); x != end; ++x)
|
||||
{
|
||||
remaining_nodes[x].id = x;
|
||||
}
|
||||
});
|
||||
|
||||
bool use_cached_node_priorities = !node_levels.empty();
|
||||
if (use_cached_node_priorities)
|
||||
{
|
||||
util::UnbufferedLog log;
|
||||
log << "using cached node priorities ...";
|
||||
node_priorities.swap(node_levels);
|
||||
log << "ok";
|
||||
}
|
||||
else
|
||||
{
|
||||
node_depth.resize(number_of_nodes, 0);
|
||||
node_priorities.resize(number_of_nodes);
|
||||
node_levels.resize(number_of_nodes);
|
||||
|
||||
util::UnbufferedLog log;
|
||||
log << "initializing elimination PQ ...";
|
||||
tbb::parallel_for(tbb::blocked_range<NodeID>(0, number_of_nodes, PQGrainSize),
|
||||
[this, &node_priorities, &node_depth, &thread_data_list](
|
||||
const tbb::blocked_range<NodeID> &range) {
|
||||
ContractorThreadData *data = thread_data_list.GetThreadData();
|
||||
for (auto x = range.begin(), end = range.end(); x != end; ++x)
|
||||
{
|
||||
node_priorities[x] =
|
||||
this->EvaluateNodePriority(data, node_depth[x], x);
|
||||
}
|
||||
});
|
||||
log << "ok";
|
||||
}
|
||||
BOOST_ASSERT(node_priorities.size() == number_of_nodes);
|
||||
|
||||
util::Log() << "preprocessing " << number_of_nodes << " nodes ...";
|
||||
|
||||
util::UnbufferedLog log;
|
||||
util::Percent p(log, number_of_nodes);
|
||||
|
||||
unsigned current_level = 0;
|
||||
bool flushed_contractor = false;
|
||||
while (number_of_nodes > 2 &&
|
||||
number_of_contracted_nodes < static_cast<NodeID>(number_of_nodes * core_factor))
|
||||
{
|
||||
if (!flushed_contractor && (number_of_contracted_nodes >
|
||||
static_cast<NodeID>(number_of_nodes * 0.65 * core_factor)))
|
||||
{
|
||||
log << " [flush " << number_of_contracted_nodes << " nodes] ";
|
||||
|
||||
FlushDataAndRebuildContractorGraph(
|
||||
thread_data_list, remaining_nodes, node_priorities);
|
||||
|
||||
flushed_contractor = true;
|
||||
}
|
||||
|
||||
tbb::parallel_for(
|
||||
tbb::blocked_range<NodeID>(0, remaining_nodes.size(), IndependentGrainSize),
|
||||
[this, &node_priorities, &remaining_nodes, &thread_data_list](
|
||||
const tbb::blocked_range<NodeID> &range) {
|
||||
ContractorThreadData *data = thread_data_list.GetThreadData();
|
||||
// determine independent node set
|
||||
for (auto i = range.begin(), end = range.end(); i != end; ++i)
|
||||
{
|
||||
const NodeID node = remaining_nodes[i].id;
|
||||
remaining_nodes[i].is_independent =
|
||||
this->IsNodeIndependent(node_priorities, data, node);
|
||||
}
|
||||
});
|
||||
|
||||
// sort all remaining nodes to the beginning of the sequence
|
||||
const auto begin_independent_nodes = stable_partition(
|
||||
remaining_nodes.begin(), remaining_nodes.end(), [](RemainingNodeData node_data) {
|
||||
return !node_data.is_independent;
|
||||
});
|
||||
auto begin_independent_nodes_idx =
|
||||
std::distance(remaining_nodes.begin(), begin_independent_nodes);
|
||||
auto end_independent_nodes_idx = remaining_nodes.size();
|
||||
|
||||
if (!use_cached_node_priorities)
|
||||
{
|
||||
// write out contraction level
|
||||
tbb::parallel_for(
|
||||
tbb::blocked_range<NodeID>(
|
||||
begin_independent_nodes_idx, end_independent_nodes_idx, ContractGrainSize),
|
||||
[this, remaining_nodes, flushed_contractor, current_level](
|
||||
const tbb::blocked_range<NodeID> &range) {
|
||||
if (flushed_contractor)
|
||||
{
|
||||
for (auto position = range.begin(), end = range.end(); position != end;
|
||||
++position)
|
||||
{
|
||||
const NodeID x = remaining_nodes[position].id;
|
||||
node_levels[orig_node_id_from_new_node_id_map[x]] = current_level;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (auto position = range.begin(), end = range.end(); position != end;
|
||||
++position)
|
||||
{
|
||||
const NodeID x = remaining_nodes[position].id;
|
||||
node_levels[x] = current_level;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// contract independent nodes
|
||||
tbb::parallel_for(
|
||||
tbb::blocked_range<NodeID>(
|
||||
begin_independent_nodes_idx, end_independent_nodes_idx, ContractGrainSize),
|
||||
[this, &remaining_nodes, &thread_data_list](
|
||||
const tbb::blocked_range<NodeID> &range) {
|
||||
ContractorThreadData *data = thread_data_list.GetThreadData();
|
||||
for (auto position = range.begin(), end = range.end(); position != end;
|
||||
++position)
|
||||
{
|
||||
const NodeID x = remaining_nodes[position].id;
|
||||
this->ContractNode<false>(data, x);
|
||||
}
|
||||
});
|
||||
|
||||
tbb::parallel_for(
|
||||
tbb::blocked_range<NodeID>(
|
||||
begin_independent_nodes_idx, end_independent_nodes_idx, DeleteGrainSize),
|
||||
[this, &remaining_nodes, &thread_data_list](
|
||||
const tbb::blocked_range<NodeID> &range) {
|
||||
ContractorThreadData *data = thread_data_list.GetThreadData();
|
||||
for (auto position = range.begin(), end = range.end(); position != end;
|
||||
++position)
|
||||
{
|
||||
const NodeID x = remaining_nodes[position].id;
|
||||
this->DeleteIncomingEdges(data, x);
|
||||
}
|
||||
});
|
||||
|
||||
// make sure we really sort each block
|
||||
tbb::parallel_for(
|
||||
thread_data_list.data.range(),
|
||||
[&](const ThreadDataContainer::EnumerableThreadData::range_type &range) {
|
||||
for (auto &data : range)
|
||||
tbb::parallel_sort(data->inserted_edges.begin(),
|
||||
data->inserted_edges.end());
|
||||
});
|
||||
|
||||
// insert new edges
|
||||
for (auto &data : thread_data_list.data)
|
||||
{
|
||||
for (const ContractorEdge &edge : data->inserted_edges)
|
||||
{
|
||||
const EdgeID current_edge_ID =
|
||||
contractor_graph->FindEdge(edge.source, edge.target);
|
||||
if (current_edge_ID < contractor_graph->EndEdges(edge.source))
|
||||
{
|
||||
ContractorGraph::EdgeData ¤t_data =
|
||||
contractor_graph->GetEdgeData(current_edge_ID);
|
||||
if (current_data.shortcut && edge.data.forward == current_data.forward &&
|
||||
edge.data.backward == current_data.backward &&
|
||||
edge.data.weight < current_data.weight)
|
||||
{
|
||||
// found a duplicate edge with smaller weight, update it.
|
||||
current_data = edge.data;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
contractor_graph->InsertEdge(edge.source, edge.target, edge.data);
|
||||
}
|
||||
data->inserted_edges.clear();
|
||||
}
|
||||
|
||||
if (!use_cached_node_priorities)
|
||||
{
|
||||
tbb::parallel_for(
|
||||
tbb::blocked_range<NodeID>(begin_independent_nodes_idx,
|
||||
end_independent_nodes_idx,
|
||||
NeighboursGrainSize),
|
||||
[this, &node_priorities, &remaining_nodes, &node_depth, &thread_data_list](
|
||||
const tbb::blocked_range<NodeID> &range) {
|
||||
ContractorThreadData *data = thread_data_list.GetThreadData();
|
||||
for (auto position = range.begin(), end = range.end(); position != end;
|
||||
++position)
|
||||
{
|
||||
NodeID x = remaining_nodes[position].id;
|
||||
this->UpdateNodeNeighbours(node_priorities, node_depth, data, x);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// remove contracted nodes from the pool
|
||||
number_of_contracted_nodes += end_independent_nodes_idx - begin_independent_nodes_idx;
|
||||
remaining_nodes.resize(begin_independent_nodes_idx);
|
||||
|
||||
p.PrintStatus(number_of_contracted_nodes);
|
||||
++current_level;
|
||||
}
|
||||
|
||||
if (remaining_nodes.size() > 2)
|
||||
{
|
||||
if (orig_node_id_from_new_node_id_map.size() > 0)
|
||||
{
|
||||
tbb::parallel_for(
|
||||
tbb::blocked_range<NodeID>(0, remaining_nodes.size(), InitGrainSize),
|
||||
[this, &remaining_nodes](const tbb::blocked_range<NodeID> &range) {
|
||||
for (auto x = range.begin(), end = range.end(); x != end; ++x)
|
||||
{
|
||||
const auto orig_id = remaining_nodes[x].id;
|
||||
is_core_node[orig_node_id_from_new_node_id_map[orig_id]] = true;
|
||||
}
|
||||
});
|
||||
}
|
||||
else
|
||||
{
|
||||
tbb::parallel_for(
|
||||
tbb::blocked_range<NodeID>(0, remaining_nodes.size(), InitGrainSize),
|
||||
[this, &remaining_nodes](const tbb::blocked_range<NodeID> &range) {
|
||||
for (auto x = range.begin(), end = range.end(); x != end; ++x)
|
||||
{
|
||||
const auto orig_id = remaining_nodes[x].id;
|
||||
is_core_node[orig_id] = true;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// in this case we don't need core markers since we fully contracted
|
||||
// the graph
|
||||
is_core_node.clear();
|
||||
}
|
||||
|
||||
util::Log() << "[core] " << remaining_nodes.size() << " nodes "
|
||||
<< contractor_graph->GetNumberOfEdges() << " edges.";
|
||||
|
||||
thread_data_list.data.clear();
|
||||
}
|
||||
|
||||
inline void GetCoreMarker(std::vector<bool> &out_is_core_node)
|
||||
{
|
||||
out_is_core_node.swap(is_core_node);
|
||||
}
|
||||
|
||||
inline void GetNodeLevels(std::vector<float> &out_node_levels)
|
||||
{
|
||||
out_node_levels.swap(node_levels);
|
||||
}
|
||||
void GetNodeLevels(std::vector<float> &out_node_levels);
|
||||
|
||||
template <class Edge> inline void GetEdges(util::DeallocatingVector<Edge> &edges)
|
||||
{
|
||||
@ -665,109 +163,15 @@ class GraphContractor
|
||||
}
|
||||
|
||||
private:
|
||||
inline void RelaxNode(const NodeID node,
|
||||
const NodeID forbidden_node,
|
||||
const int weight,
|
||||
ContractorHeap &heap)
|
||||
{
|
||||
const short current_hop = heap.GetData(node).hop + 1;
|
||||
for (auto edge : contractor_graph->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const ContractorEdgeData &data = contractor_graph->GetEdgeData(edge);
|
||||
if (!data.forward)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
const NodeID to = contractor_graph->GetTarget(edge);
|
||||
if (forbidden_node == to)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
const int to_weight = weight + data.weight;
|
||||
|
||||
// New Node discovered -> Add to Heap + Node Info Storage
|
||||
if (!heap.WasInserted(to))
|
||||
{
|
||||
heap.Insert(to, to_weight, ContractorHeapData{current_hop, false});
|
||||
}
|
||||
// Found a shorter Path -> Update weight
|
||||
else if (to_weight < heap.GetKey(to))
|
||||
{
|
||||
heap.DecreaseKey(to, to_weight);
|
||||
heap.GetData(to).hop = current_hop;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline void Dijkstra(const int max_weight,
|
||||
const unsigned number_of_targets,
|
||||
const int max_nodes,
|
||||
ContractorThreadData &data,
|
||||
const NodeID middle_node)
|
||||
{
|
||||
|
||||
ContractorHeap &heap = data.heap;
|
||||
|
||||
int nodes = 0;
|
||||
unsigned number_of_targets_found = 0;
|
||||
while (!heap.Empty())
|
||||
{
|
||||
const NodeID node = heap.DeleteMin();
|
||||
const auto weight = heap.GetKey(node);
|
||||
if (++nodes > max_nodes)
|
||||
{
|
||||
return;
|
||||
}
|
||||
if (weight > max_weight)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
// Destination settled?
|
||||
if (heap.GetData(node).target)
|
||||
{
|
||||
++number_of_targets_found;
|
||||
if (number_of_targets_found >= number_of_targets)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
RelaxNode(node, middle_node, weight, heap);
|
||||
}
|
||||
}
|
||||
|
||||
inline float EvaluateNodePriority(ContractorThreadData *const data,
|
||||
const NodeDepth node_depth,
|
||||
const NodeID node)
|
||||
{
|
||||
ContractionStats stats;
|
||||
|
||||
// perform simulated contraction
|
||||
ContractNode<true>(data, node, &stats);
|
||||
|
||||
// Result will contain the priority
|
||||
float result;
|
||||
if (0 == (stats.edges_deleted_count * stats.original_edges_deleted_count))
|
||||
{
|
||||
result = 1.f * node_depth;
|
||||
}
|
||||
else
|
||||
{
|
||||
result = 2.f * (((float)stats.edges_added_count) / stats.edges_deleted_count) +
|
||||
4.f * (((float)stats.original_edges_added_count) /
|
||||
stats.original_edges_deleted_count) +
|
||||
1.f * node_depth;
|
||||
}
|
||||
BOOST_ASSERT(result >= 0);
|
||||
return result;
|
||||
}
|
||||
float EvaluateNodePriority(ContractorThreadData *const data,
|
||||
const NodeDepth node_depth,
|
||||
const NodeID node);
|
||||
|
||||
template <bool RUNSIMULATION>
|
||||
inline bool
|
||||
bool
|
||||
ContractNode(ContractorThreadData *data, const NodeID node, ContractionStats *stats = nullptr)
|
||||
{
|
||||
ContractorHeap &heap = data->heap;
|
||||
auto &dijkstra = data->dijkstra;
|
||||
std::size_t inserted_edges_size = data->inserted_edges.size();
|
||||
std::vector<ContractorEdge> &inserted_edges = data->inserted_edges;
|
||||
const constexpr bool SHORTCUT_ARC = true;
|
||||
@ -794,8 +198,8 @@ class GraphContractor
|
||||
continue;
|
||||
}
|
||||
|
||||
heap.Clear();
|
||||
heap.Insert(source, 0, ContractorHeapData{});
|
||||
dijkstra.Clear();
|
||||
dijkstra.Insert(source, 0, ContractorHeapData{});
|
||||
int max_weight = 0;
|
||||
unsigned number_of_targets = 0;
|
||||
|
||||
@ -858,9 +262,9 @@ class GraphContractor
|
||||
continue;
|
||||
}
|
||||
max_weight = std::max(max_weight, path_weight);
|
||||
if (!heap.WasInserted(target))
|
||||
if (!dijkstra.WasInserted(target))
|
||||
{
|
||||
heap.Insert(target, INVALID_EDGE_WEIGHT, ContractorHeapData{0, true});
|
||||
dijkstra.Insert(target, INVALID_EDGE_WEIGHT, ContractorHeapData{0, true});
|
||||
++number_of_targets;
|
||||
}
|
||||
}
|
||||
@ -868,12 +272,17 @@ class GraphContractor
|
||||
if (RUNSIMULATION)
|
||||
{
|
||||
const int constexpr SIMULATION_SEARCH_SPACE_SIZE = 1000;
|
||||
Dijkstra(max_weight, number_of_targets, SIMULATION_SEARCH_SPACE_SIZE, *data, node);
|
||||
dijkstra.Run(number_of_targets,
|
||||
SIMULATION_SEARCH_SPACE_SIZE,
|
||||
max_weight,
|
||||
node,
|
||||
*contractor_graph);
|
||||
}
|
||||
else
|
||||
{
|
||||
const int constexpr FULL_SEARCH_SPACE_SIZE = 2000;
|
||||
Dijkstra(max_weight, number_of_targets, FULL_SEARCH_SPACE_SIZE, *data, node);
|
||||
dijkstra.Run(
|
||||
number_of_targets, FULL_SEARCH_SPACE_SIZE, max_weight, node, *contractor_graph);
|
||||
}
|
||||
for (auto out_edge : contractor_graph->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
@ -886,7 +295,7 @@ class GraphContractor
|
||||
if (target == node)
|
||||
continue;
|
||||
const int path_weight = in_data.weight + out_data.weight;
|
||||
const int weight = heap.GetKey(target);
|
||||
const int weight = dijkstra.GetKey(target);
|
||||
if (path_weight < weight)
|
||||
{
|
||||
if (RUNSIMULATION)
|
||||
@ -960,138 +369,19 @@ class GraphContractor
|
||||
return true;
|
||||
}
|
||||
|
||||
inline void DeleteIncomingEdges(ContractorThreadData *data, const NodeID node)
|
||||
{
|
||||
std::vector<NodeID> &neighbours = data->neighbours;
|
||||
neighbours.clear();
|
||||
void DeleteIncomingEdges(ContractorThreadData *data, const NodeID node);
|
||||
|
||||
// find all neighbours
|
||||
for (auto e : contractor_graph->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const NodeID u = contractor_graph->GetTarget(e);
|
||||
if (u != node)
|
||||
{
|
||||
neighbours.push_back(u);
|
||||
}
|
||||
}
|
||||
// eliminate duplicate entries ( forward + backward edges )
|
||||
std::sort(neighbours.begin(), neighbours.end());
|
||||
neighbours.resize(std::unique(neighbours.begin(), neighbours.end()) - neighbours.begin());
|
||||
bool UpdateNodeNeighbours(std::vector<float> &priorities,
|
||||
std::vector<NodeDepth> &node_depth,
|
||||
ContractorThreadData *const data,
|
||||
const NodeID node);
|
||||
|
||||
for (const auto i : util::irange<std::size_t>(0, neighbours.size()))
|
||||
{
|
||||
contractor_graph->DeleteEdgesTo(neighbours[i], node);
|
||||
}
|
||||
}
|
||||
|
||||
inline bool UpdateNodeNeighbours(std::vector<float> &priorities,
|
||||
std::vector<NodeDepth> &node_depth,
|
||||
ContractorThreadData *const data,
|
||||
const NodeID node)
|
||||
{
|
||||
std::vector<NodeID> &neighbours = data->neighbours;
|
||||
neighbours.clear();
|
||||
|
||||
// find all neighbours
|
||||
for (auto e : contractor_graph->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const NodeID u = contractor_graph->GetTarget(e);
|
||||
if (u == node)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
neighbours.push_back(u);
|
||||
node_depth[u] = std::max(node_depth[node] + 1, node_depth[u]);
|
||||
}
|
||||
// eliminate duplicate entries ( forward + backward edges )
|
||||
std::sort(neighbours.begin(), neighbours.end());
|
||||
neighbours.resize(std::unique(neighbours.begin(), neighbours.end()) - neighbours.begin());
|
||||
|
||||
// re-evaluate priorities of neighboring nodes
|
||||
for (const NodeID u : neighbours)
|
||||
{
|
||||
priorities[u] = EvaluateNodePriority(data, node_depth[u], u);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
inline bool IsNodeIndependent(const std::vector<float> &priorities,
|
||||
ContractorThreadData *const data,
|
||||
NodeID node) const
|
||||
{
|
||||
const float priority = priorities[node];
|
||||
|
||||
std::vector<NodeID> &neighbours = data->neighbours;
|
||||
neighbours.clear();
|
||||
|
||||
for (auto e : contractor_graph->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const NodeID target = contractor_graph->GetTarget(e);
|
||||
if (node == target)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
const float target_priority = priorities[target];
|
||||
BOOST_ASSERT(target_priority >= 0);
|
||||
// found a neighbour with lower priority?
|
||||
if (priority > target_priority)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// tie breaking
|
||||
if (std::abs(priority - target_priority) < std::numeric_limits<float>::epsilon() &&
|
||||
Bias(node, target))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
neighbours.push_back(target);
|
||||
}
|
||||
|
||||
std::sort(neighbours.begin(), neighbours.end());
|
||||
neighbours.resize(std::unique(neighbours.begin(), neighbours.end()) - neighbours.begin());
|
||||
|
||||
// examine all neighbours that are at most 2 hops away
|
||||
for (const NodeID u : neighbours)
|
||||
{
|
||||
for (auto e : contractor_graph->GetAdjacentEdgeRange(u))
|
||||
{
|
||||
const NodeID target = contractor_graph->GetTarget(e);
|
||||
if (node == target)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
const float target_priority = priorities[target];
|
||||
BOOST_ASSERT(target_priority >= 0);
|
||||
// found a neighbour with lower priority?
|
||||
if (priority > target_priority)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// tie breaking
|
||||
if (std::abs(priority - target_priority) < std::numeric_limits<float>::epsilon() &&
|
||||
Bias(node, target))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
bool IsNodeIndependent(const std::vector<float> &priorities,
|
||||
ContractorThreadData *const data,
|
||||
NodeID node) const;
|
||||
|
||||
// This bias function takes up 22 assembly instructions in total on X86
|
||||
inline bool Bias(const NodeID a, const NodeID b) const
|
||||
{
|
||||
const unsigned short hasha = fast_hash(a);
|
||||
const unsigned short hashb = fast_hash(b);
|
||||
|
||||
// The compiler optimizes that to conditional register flags but without branching
|
||||
// statements!
|
||||
if (hasha != hashb)
|
||||
{
|
||||
return hasha < hashb;
|
||||
}
|
||||
return a < b;
|
||||
}
|
||||
bool Bias(const NodeID a, const NodeID b) const;
|
||||
|
||||
std::shared_ptr<ContractorGraph> contractor_graph;
|
||||
stxxl::vector<QueryEdge> external_edge_list;
|
||||
@ -1107,7 +397,8 @@ class GraphContractor
|
||||
std::vector<bool> is_core_node;
|
||||
util::XORFastHash<> fast_hash;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#endif // CONTRACTOR_HPP
|
||||
} // namespace contractor
|
||||
} // namespace osrm
|
||||
|
||||
#endif // OSRM_CONTRACTOR_GRAPH_CONTRACTOR_HPP
|
||||
|
59
include/contractor/graph_contractor_adaptors.hpp
Normal file
59
include/contractor/graph_contractor_adaptors.hpp
Normal file
@ -0,0 +1,59 @@
|
||||
#ifndef OSRM_CONTRACTOR_GRAPH_CONTRACTION_ADAPTORS_HPP_
|
||||
#define OSRM_CONTRACTOR_GRAPH_CONTRACTION_ADAPTORS_HPP_
|
||||
|
||||
#include "contractor/contractor_graph.hpp"
|
||||
#include "util/log.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace contractor
|
||||
{
|
||||
|
||||
// Make sure to move in the input edge list!
|
||||
template <typename InputEdgeContainer>
|
||||
std::vector<ContractorEdge> adaptToContractorInput(InputEdgeContainer input_edge_list)
|
||||
{
|
||||
std::vector<ContractorEdge> edges;
|
||||
edges.reserve(input_edge_list.size() * 2);
|
||||
|
||||
for (const auto &input_edge : input_edge_list)
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
const unsigned int constexpr DAY_IN_DECI_SECONDS = 24 * 60 * 60 * 10;
|
||||
if (static_cast<unsigned int>(std::max(input_edge.weight, 1)) > DAY_IN_DECI_SECONDS)
|
||||
{
|
||||
util::Log(logWARNING) << "Edge weight large -> "
|
||||
<< static_cast<unsigned int>(std::max(input_edge.weight, 1))
|
||||
<< " : " << static_cast<unsigned int>(input_edge.source) << " -> "
|
||||
<< static_cast<unsigned int>(input_edge.target);
|
||||
}
|
||||
#endif
|
||||
edges.emplace_back(input_edge.source,
|
||||
input_edge.target,
|
||||
static_cast<unsigned int>(std::max(input_edge.weight, 1)),
|
||||
1,
|
||||
input_edge.edge_id,
|
||||
false,
|
||||
input_edge.forward ? true : false,
|
||||
input_edge.backward ? true : false);
|
||||
|
||||
edges.emplace_back(input_edge.target,
|
||||
input_edge.source,
|
||||
static_cast<unsigned int>(std::max(input_edge.weight, 1)),
|
||||
1,
|
||||
input_edge.edge_id,
|
||||
false,
|
||||
input_edge.backward ? true : false,
|
||||
input_edge.forward ? true : false);
|
||||
}
|
||||
// FIXME not sure if we need this
|
||||
edges.shrink_to_fit();
|
||||
return edges;
|
||||
}
|
||||
|
||||
} // namespace contractor
|
||||
} // namespace osrm
|
||||
|
||||
#endif // OSRM_CONTRACTOR_GRAPH_CONTRACTION_ADAPTORS_HPP_
|
@ -4,6 +4,7 @@
|
||||
#include <boost/assert.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstddef>
|
||||
#include <limits>
|
||||
#include <map>
|
||||
#include <type_traits>
|
||||
@ -18,7 +19,7 @@ namespace util
|
||||
template <typename NodeID, typename Key> class ArrayStorage
|
||||
{
|
||||
public:
|
||||
explicit ArrayStorage(size_t size) : positions(size, 0) {}
|
||||
explicit ArrayStorage(std::size_t size) : positions(size, 0) {}
|
||||
|
||||
~ArrayStorage() {}
|
||||
|
||||
@ -35,7 +36,7 @@ template <typename NodeID, typename Key> class ArrayStorage
|
||||
template <typename NodeID, typename Key> class MapStorage
|
||||
{
|
||||
public:
|
||||
explicit MapStorage(size_t) {}
|
||||
explicit MapStorage(std::size_t) {}
|
||||
|
||||
Key &operator[](NodeID node) { return nodes[node]; }
|
||||
|
||||
@ -58,7 +59,7 @@ template <typename NodeID, typename Key> class MapStorage
|
||||
template <typename NodeID, typename Key> class UnorderedMapStorage
|
||||
{
|
||||
public:
|
||||
explicit UnorderedMapStorage(size_t) { nodes.rehash(1000); }
|
||||
explicit UnorderedMapStorage(std::size_t) { nodes.rehash(1000); }
|
||||
|
||||
Key &operator[](const NodeID node) { return nodes[node]; }
|
||||
|
||||
@ -99,7 +100,7 @@ class BinaryHeap
|
||||
using WeightType = Weight;
|
||||
using DataType = Data;
|
||||
|
||||
explicit BinaryHeap(size_t maxID) : node_index(maxID) { Clear(); }
|
||||
explicit BinaryHeap(std::size_t maxID) : node_index(maxID) { Clear(); }
|
||||
|
||||
void Clear()
|
||||
{
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include "contractor/contractor.hpp"
|
||||
#include "contractor/crc32_processor.hpp"
|
||||
#include "contractor/graph_contractor.hpp"
|
||||
#include "contractor/graph_contractor_adaptors.hpp"
|
||||
|
||||
#include "extractor/compressed_edge_container.hpp"
|
||||
#include "extractor/edge_based_graph_factory.hpp"
|
||||
@ -307,7 +308,7 @@ int Contractor::Run()
|
||||
|
||||
util::Log() << "Loading edge-expanded graph representation";
|
||||
|
||||
util::DeallocatingVector<extractor::EdgeBasedEdge> edge_based_edge_list;
|
||||
std::vector<extractor::EdgeBasedEdge> edge_based_edge_list;
|
||||
|
||||
EdgeID max_edge_id = LoadEdgeExpandedGraph(config.edge_based_graph_path,
|
||||
edge_based_edge_list,
|
||||
@ -334,12 +335,16 @@ int Contractor::Run()
|
||||
}
|
||||
|
||||
util::DeallocatingVector<QueryEdge> contracted_edge_list;
|
||||
ContractGraph(max_edge_id,
|
||||
edge_based_edge_list,
|
||||
contracted_edge_list,
|
||||
std::move(node_weights),
|
||||
is_core_node,
|
||||
node_levels);
|
||||
{ // own scope to not keep the contractor around
|
||||
GraphContractor graph_contractor(max_edge_id + 1,
|
||||
adaptToContractorInput(std::move(edge_based_edge_list)),
|
||||
std::move(node_levels),
|
||||
std::move(node_weights));
|
||||
graph_contractor.Run(config.core_factor);
|
||||
graph_contractor.GetEdges(contracted_edge_list);
|
||||
graph_contractor.GetCoreMarker(is_core_node);
|
||||
graph_contractor.GetNodeLevels(node_levels);
|
||||
}
|
||||
TIMER_STOP(contraction);
|
||||
|
||||
util::Log() << "Contraction took " << TIMER_SEC(contraction) << " sec";
|
||||
@ -367,21 +372,20 @@ int Contractor::Run()
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Utilities for LoadEdgeExpandedGraph to restore my sanity
|
||||
EdgeID Contractor::LoadEdgeExpandedGraph(
|
||||
std::string const &edge_based_graph_filename,
|
||||
util::DeallocatingVector<extractor::EdgeBasedEdge> &edge_based_edge_list,
|
||||
std::vector<EdgeWeight> &node_weights,
|
||||
const std::string &edge_segment_lookup_filename,
|
||||
const std::string &edge_penalty_filename,
|
||||
const std::vector<std::string> &segment_speed_filenames,
|
||||
const std::vector<std::string> &turn_penalty_filenames,
|
||||
const std::string &nodes_filename,
|
||||
const std::string &geometry_filename,
|
||||
const std::string &datasource_names_filename,
|
||||
const std::string &datasource_indexes_filename,
|
||||
const std::string &rtree_leaf_filename,
|
||||
const double log_edge_updates_factor)
|
||||
EdgeID
|
||||
Contractor::LoadEdgeExpandedGraph(std::string const &edge_based_graph_filename,
|
||||
std::vector<extractor::EdgeBasedEdge> &edge_based_edge_list,
|
||||
std::vector<EdgeWeight> &node_weights,
|
||||
const std::string &edge_segment_lookup_filename,
|
||||
const std::string &edge_penalty_filename,
|
||||
const std::vector<std::string> &segment_speed_filenames,
|
||||
const std::vector<std::string> &turn_penalty_filenames,
|
||||
const std::string &nodes_filename,
|
||||
const std::string &geometry_filename,
|
||||
const std::string &datasource_names_filename,
|
||||
const std::string &datasource_indexes_filename,
|
||||
const std::string &rtree_leaf_filename,
|
||||
const double log_edge_updates_factor)
|
||||
{
|
||||
if (segment_speed_filenames.size() > 255 || turn_penalty_filenames.size() > 255)
|
||||
throw util::exception("Limit of 255 segment speed and turn penalty files each reached" +
|
||||
@ -957,26 +961,5 @@ Contractor::WriteContractedGraph(unsigned max_node_id,
|
||||
return number_of_used_edges;
|
||||
}
|
||||
|
||||
/**
|
||||
\brief Build contracted graph.
|
||||
*/
|
||||
void Contractor::ContractGraph(
|
||||
const EdgeID max_edge_id,
|
||||
util::DeallocatingVector<extractor::EdgeBasedEdge> &edge_based_edge_list,
|
||||
util::DeallocatingVector<QueryEdge> &contracted_edge_list,
|
||||
std::vector<EdgeWeight> &&node_weights,
|
||||
std::vector<bool> &is_core_node,
|
||||
std::vector<float> &inout_node_levels) const
|
||||
{
|
||||
std::vector<float> node_levels;
|
||||
node_levels.swap(inout_node_levels);
|
||||
|
||||
GraphContractor graph_contractor(
|
||||
max_edge_id + 1, edge_based_edge_list, std::move(node_levels), std::move(node_weights));
|
||||
graph_contractor.Run(config.core_factor);
|
||||
graph_contractor.GetEdges(contracted_edge_list);
|
||||
graph_contractor.GetCoreMarker(is_core_node);
|
||||
graph_contractor.GetNodeLevels(inout_node_levels);
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace contractor
|
||||
} // namespace osrm
|
||||
|
96
src/contractor/contractor_dijkstra.cpp
Normal file
96
src/contractor/contractor_dijkstra.cpp
Normal file
@ -0,0 +1,96 @@
|
||||
#include "contractor/contractor_dijkstra.hpp"
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace contractor
|
||||
{
|
||||
|
||||
ContractorDijkstra::ContractorDijkstra(const std::size_t heap_size) : heap(heap_size) {}
|
||||
|
||||
void ContractorDijkstra::Run(const unsigned number_of_targets,
|
||||
const int node_limit,
|
||||
const int weight_limit,
|
||||
const NodeID forbidden_node,
|
||||
const ContractorGraph &graph)
|
||||
{
|
||||
int nodes = 0;
|
||||
unsigned number_of_targets_found = 0;
|
||||
while (!heap.Empty())
|
||||
{
|
||||
const NodeID node = heap.DeleteMin();
|
||||
const auto node_weight = heap.GetKey(node);
|
||||
if (++nodes > node_limit)
|
||||
{
|
||||
return;
|
||||
}
|
||||
if (node_weight > weight_limit)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
// Destination settled?
|
||||
if (heap.GetData(node).target)
|
||||
{
|
||||
++number_of_targets_found;
|
||||
if (number_of_targets_found >= number_of_targets)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
RelaxNode(node, node_weight, forbidden_node, graph);
|
||||
}
|
||||
}
|
||||
|
||||
void ContractorDijkstra::RelaxNode(const NodeID node,
|
||||
const int node_weight,
|
||||
const NodeID forbidden_node,
|
||||
const ContractorGraph &graph)
|
||||
{
|
||||
const short current_hop = heap.GetData(node).hop + 1;
|
||||
for (auto edge : graph.GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const ContractorEdgeData &data = graph.GetEdgeData(edge);
|
||||
if (!data.forward)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
const NodeID to = graph.GetTarget(edge);
|
||||
if (forbidden_node == to)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
const int to_weight = node_weight + data.weight;
|
||||
|
||||
// New Node discovered -> Add to Heap + Node Info Storage
|
||||
if (!heap.WasInserted(to))
|
||||
{
|
||||
heap.Insert(to, to_weight, ContractorHeapData{current_hop, false});
|
||||
}
|
||||
// Found a shorter Path -> Update weight
|
||||
else if (to_weight < GetKey(to))
|
||||
{
|
||||
heap.DecreaseKey(to, to_weight);
|
||||
heap.GetData(to).hop = current_hop;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ContractorDijkstra::Clear() { heap.Clear(); }
|
||||
|
||||
bool ContractorDijkstra::WasInserted(const NodeID node) const { return heap.WasInserted(node); }
|
||||
|
||||
void ContractorDijkstra::Insert(const NodeID node,
|
||||
const ContractorHeap::WeightType weight,
|
||||
const ContractorHeap::DataType &data)
|
||||
{
|
||||
heap.Insert(node, weight, data);
|
||||
}
|
||||
|
||||
ContractorHeap::WeightType ContractorDijkstra::GetKey(const NodeID node)
|
||||
{
|
||||
return heap.GetKey(node);
|
||||
}
|
||||
|
||||
} // namespace contractor
|
||||
} // namespace osrm
|
592
src/contractor/graph_contractor.cpp
Normal file
592
src/contractor/graph_contractor.cpp
Normal file
@ -0,0 +1,592 @@
|
||||
#include "contractor/graph_contractor.hpp"
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace contractor
|
||||
{
|
||||
|
||||
GraphContractor::GraphContractor(int nodes, std::vector<ContractorEdge> input_edge_list)
|
||||
: GraphContractor(nodes, std::move(input_edge_list), {}, {})
|
||||
{
|
||||
}
|
||||
|
||||
GraphContractor::GraphContractor(int nodes,
|
||||
std::vector<ContractorEdge> edges,
|
||||
std::vector<float> &&node_levels_,
|
||||
std::vector<EdgeWeight> &&node_weights_)
|
||||
: node_levels(std::move(node_levels_)), node_weights(std::move(node_weights_))
|
||||
{
|
||||
tbb::parallel_sort(edges.begin(), edges.end());
|
||||
NodeID edge = 0;
|
||||
for (NodeID i = 0; i < edges.size();)
|
||||
{
|
||||
const NodeID source = edges[i].source;
|
||||
const NodeID target = edges[i].target;
|
||||
const NodeID id = edges[i].data.id;
|
||||
// remove eigenloops
|
||||
if (source == target)
|
||||
{
|
||||
++i;
|
||||
continue;
|
||||
}
|
||||
ContractorEdge forward_edge;
|
||||
ContractorEdge reverse_edge;
|
||||
forward_edge.source = reverse_edge.source = source;
|
||||
forward_edge.target = reverse_edge.target = target;
|
||||
forward_edge.data.forward = reverse_edge.data.backward = true;
|
||||
forward_edge.data.backward = reverse_edge.data.forward = false;
|
||||
forward_edge.data.shortcut = reverse_edge.data.shortcut = false;
|
||||
forward_edge.data.id = reverse_edge.data.id = id;
|
||||
forward_edge.data.originalEdges = reverse_edge.data.originalEdges = 1;
|
||||
forward_edge.data.weight = reverse_edge.data.weight = INVALID_EDGE_WEIGHT;
|
||||
// remove parallel edges
|
||||
while (i < edges.size() && edges[i].source == source && edges[i].target == target)
|
||||
{
|
||||
if (edges[i].data.forward)
|
||||
{
|
||||
forward_edge.data.weight = std::min(edges[i].data.weight, forward_edge.data.weight);
|
||||
}
|
||||
if (edges[i].data.backward)
|
||||
{
|
||||
reverse_edge.data.weight = std::min(edges[i].data.weight, reverse_edge.data.weight);
|
||||
}
|
||||
++i;
|
||||
}
|
||||
// merge edges (s,t) and (t,s) into bidirectional edge
|
||||
if (forward_edge.data.weight == reverse_edge.data.weight)
|
||||
{
|
||||
if ((int)forward_edge.data.weight != INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
forward_edge.data.backward = true;
|
||||
edges[edge++] = forward_edge;
|
||||
}
|
||||
}
|
||||
else
|
||||
{ // insert seperate edges
|
||||
if (((int)forward_edge.data.weight) != INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
edges[edge++] = forward_edge;
|
||||
}
|
||||
if ((int)reverse_edge.data.weight != INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
edges[edge++] = reverse_edge;
|
||||
}
|
||||
}
|
||||
}
|
||||
util::Log() << "merged " << edges.size() - edge << " edges out of " << edges.size();
|
||||
edges.resize(edge);
|
||||
contractor_graph = std::make_shared<ContractorGraph>(nodes, edges);
|
||||
edges.clear();
|
||||
edges.shrink_to_fit();
|
||||
|
||||
BOOST_ASSERT(0 == edges.capacity());
|
||||
util::Log() << "contractor finished initalization";
|
||||
}
|
||||
|
||||
/* Flush all data from the contraction to disc and reorder stuff for better locality */
|
||||
void GraphContractor::FlushDataAndRebuildContractorGraph(
|
||||
ThreadDataContainer &thread_data_list,
|
||||
std::vector<RemainingNodeData> &remaining_nodes,
|
||||
std::vector<float> &node_priorities)
|
||||
{
|
||||
util::DeallocatingVector<ContractorEdge> new_edge_set; // this one is not explicitely
|
||||
// cleared since it goes out of
|
||||
// scope anywa
|
||||
// Delete old heap data to free memory that we need for the coming operations
|
||||
thread_data_list.data.clear();
|
||||
// Create new priority array
|
||||
std::vector<float> new_node_priority(remaining_nodes.size());
|
||||
std::vector<EdgeWeight> new_node_weights(remaining_nodes.size());
|
||||
// this map gives the old IDs from the new ones, necessary to get a consistent graph
|
||||
// at the end of contraction
|
||||
orig_node_id_from_new_node_id_map.resize(remaining_nodes.size());
|
||||
// this map gives the new IDs from the old ones, necessary to remap targets from the
|
||||
// remaining graph
|
||||
const auto number_of_nodes = contractor_graph->GetNumberOfNodes();
|
||||
std::vector<NodeID> new_node_id_from_orig_id_map(number_of_nodes, SPECIAL_NODEID);
|
||||
for (const auto new_node_id : util::irange<std::size_t>(0UL, remaining_nodes.size()))
|
||||
{
|
||||
auto &node = remaining_nodes[new_node_id];
|
||||
BOOST_ASSERT(node_priorities.size() > node.id);
|
||||
new_node_priority[new_node_id] = node_priorities[node.id];
|
||||
BOOST_ASSERT(node_weights.size() > node.id);
|
||||
new_node_weights[new_node_id] = node_weights[node.id];
|
||||
}
|
||||
// build forward and backward renumbering map and remap ids in remaining_nodes
|
||||
for (const auto new_node_id : util::irange<std::size_t>(0UL, remaining_nodes.size()))
|
||||
{
|
||||
auto &node = remaining_nodes[new_node_id];
|
||||
// create renumbering maps in both directions
|
||||
orig_node_id_from_new_node_id_map[new_node_id] = node.id;
|
||||
new_node_id_from_orig_id_map[node.id] = new_node_id;
|
||||
node.id = new_node_id;
|
||||
}
|
||||
// walk over all nodes
|
||||
for (const auto source : util::irange<NodeID>(0UL, contractor_graph->GetNumberOfNodes()))
|
||||
{
|
||||
for (auto current_edge : contractor_graph->GetAdjacentEdgeRange(source))
|
||||
{
|
||||
ContractorGraph::EdgeData &data = contractor_graph->GetEdgeData(current_edge);
|
||||
const NodeID target = contractor_graph->GetTarget(current_edge);
|
||||
if (SPECIAL_NODEID == new_node_id_from_orig_id_map[source])
|
||||
{
|
||||
external_edge_list.push_back({source, target, data});
|
||||
}
|
||||
else
|
||||
{
|
||||
// node is not yet contracted.
|
||||
// add (renumbered) outgoing edges to new util::DynamicGraph.
|
||||
ContractorEdge new_edge = {new_node_id_from_orig_id_map[source],
|
||||
new_node_id_from_orig_id_map[target],
|
||||
data};
|
||||
new_edge.data.is_original_via_node_ID = true;
|
||||
BOOST_ASSERT_MSG(SPECIAL_NODEID != new_node_id_from_orig_id_map[source],
|
||||
"new source id not resolveable");
|
||||
BOOST_ASSERT_MSG(SPECIAL_NODEID != new_node_id_from_orig_id_map[target],
|
||||
"new target id not resolveable");
|
||||
new_edge_set.push_back(new_edge);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Replace old priorities array by new one
|
||||
node_priorities.swap(new_node_priority);
|
||||
// Delete old node_priorities vector
|
||||
node_weights.swap(new_node_weights);
|
||||
// old Graph is removed
|
||||
contractor_graph.reset();
|
||||
// create new graph
|
||||
tbb::parallel_sort(new_edge_set.begin(), new_edge_set.end());
|
||||
contractor_graph = std::make_shared<ContractorGraph>(remaining_nodes.size(), new_edge_set);
|
||||
new_edge_set.clear();
|
||||
// INFO: MAKE SURE THIS IS THE LAST OPERATION OF THE FLUSH!
|
||||
// reinitialize heaps and ThreadData objects with appropriate size
|
||||
thread_data_list.number_of_nodes = contractor_graph->GetNumberOfNodes();
|
||||
}
|
||||
|
||||
void GraphContractor::Run(double core_factor)
|
||||
{
|
||||
// for the preperation we can use a big grain size, which is much faster (probably cache)
|
||||
const constexpr size_t InitGrainSize = 100000;
|
||||
const constexpr size_t PQGrainSize = 100000;
|
||||
// auto_partitioner will automatically increase the blocksize if we have
|
||||
// a lot of data. It is *important* for the last loop iterations
|
||||
// (which have a very small dataset) that it is devisible.
|
||||
const constexpr size_t IndependentGrainSize = 1;
|
||||
const constexpr size_t ContractGrainSize = 1;
|
||||
const constexpr size_t NeighboursGrainSize = 1;
|
||||
const constexpr size_t DeleteGrainSize = 1;
|
||||
|
||||
const NodeID number_of_nodes = contractor_graph->GetNumberOfNodes();
|
||||
|
||||
ThreadDataContainer thread_data_list(number_of_nodes);
|
||||
|
||||
NodeID number_of_contracted_nodes = 0;
|
||||
std::vector<NodeDepth> node_depth;
|
||||
std::vector<float> node_priorities;
|
||||
is_core_node.resize(number_of_nodes, false);
|
||||
|
||||
std::vector<RemainingNodeData> remaining_nodes(number_of_nodes);
|
||||
// initialize priorities in parallel
|
||||
tbb::parallel_for(tbb::blocked_range<NodeID>(0, number_of_nodes, InitGrainSize),
|
||||
[this, &remaining_nodes](const tbb::blocked_range<NodeID> &range) {
|
||||
for (auto x = range.begin(), end = range.end(); x != end; ++x)
|
||||
{
|
||||
remaining_nodes[x].id = x;
|
||||
}
|
||||
});
|
||||
|
||||
bool use_cached_node_priorities = !node_levels.empty();
|
||||
if (use_cached_node_priorities)
|
||||
{
|
||||
util::UnbufferedLog log;
|
||||
log << "using cached node priorities ...";
|
||||
node_priorities.swap(node_levels);
|
||||
log << "ok";
|
||||
}
|
||||
else
|
||||
{
|
||||
node_depth.resize(number_of_nodes, 0);
|
||||
node_priorities.resize(number_of_nodes);
|
||||
node_levels.resize(number_of_nodes);
|
||||
|
||||
util::UnbufferedLog log;
|
||||
log << "initializing elimination PQ ...";
|
||||
tbb::parallel_for(tbb::blocked_range<NodeID>(0, number_of_nodes, PQGrainSize),
|
||||
[this, &node_priorities, &node_depth, &thread_data_list](
|
||||
const tbb::blocked_range<NodeID> &range) {
|
||||
ContractorThreadData *data = thread_data_list.GetThreadData();
|
||||
for (auto x = range.begin(), end = range.end(); x != end; ++x)
|
||||
{
|
||||
node_priorities[x] =
|
||||
this->EvaluateNodePriority(data, node_depth[x], x);
|
||||
}
|
||||
});
|
||||
log << "ok";
|
||||
}
|
||||
BOOST_ASSERT(node_priorities.size() == number_of_nodes);
|
||||
|
||||
util::Log() << "preprocessing " << number_of_nodes << " nodes ...";
|
||||
|
||||
util::UnbufferedLog log;
|
||||
util::Percent p(log, number_of_nodes);
|
||||
|
||||
unsigned current_level = 0;
|
||||
bool flushed_contractor = false;
|
||||
while (number_of_nodes > 2 &&
|
||||
number_of_contracted_nodes < static_cast<NodeID>(number_of_nodes * core_factor))
|
||||
{
|
||||
if (!flushed_contractor && (number_of_contracted_nodes >
|
||||
static_cast<NodeID>(number_of_nodes * 0.65 * core_factor)))
|
||||
{
|
||||
log << " [flush " << number_of_contracted_nodes << " nodes] ";
|
||||
|
||||
FlushDataAndRebuildContractorGraph(thread_data_list, remaining_nodes, node_priorities);
|
||||
|
||||
flushed_contractor = true;
|
||||
}
|
||||
|
||||
tbb::parallel_for(
|
||||
tbb::blocked_range<NodeID>(0, remaining_nodes.size(), IndependentGrainSize),
|
||||
[this, &node_priorities, &remaining_nodes, &thread_data_list](
|
||||
const tbb::blocked_range<NodeID> &range) {
|
||||
ContractorThreadData *data = thread_data_list.GetThreadData();
|
||||
// determine independent node set
|
||||
for (auto i = range.begin(), end = range.end(); i != end; ++i)
|
||||
{
|
||||
const NodeID node = remaining_nodes[i].id;
|
||||
remaining_nodes[i].is_independent =
|
||||
this->IsNodeIndependent(node_priorities, data, node);
|
||||
}
|
||||
});
|
||||
|
||||
// sort all remaining nodes to the beginning of the sequence
|
||||
const auto begin_independent_nodes =
|
||||
stable_partition(remaining_nodes.begin(),
|
||||
remaining_nodes.end(),
|
||||
[](RemainingNodeData node_data) { return !node_data.is_independent; });
|
||||
auto begin_independent_nodes_idx =
|
||||
std::distance(remaining_nodes.begin(), begin_independent_nodes);
|
||||
auto end_independent_nodes_idx = remaining_nodes.size();
|
||||
|
||||
if (!use_cached_node_priorities)
|
||||
{
|
||||
// write out contraction level
|
||||
tbb::parallel_for(
|
||||
tbb::blocked_range<NodeID>(
|
||||
begin_independent_nodes_idx, end_independent_nodes_idx, ContractGrainSize),
|
||||
[this, remaining_nodes, flushed_contractor, current_level](
|
||||
const tbb::blocked_range<NodeID> &range) {
|
||||
if (flushed_contractor)
|
||||
{
|
||||
for (auto position = range.begin(), end = range.end(); position != end;
|
||||
++position)
|
||||
{
|
||||
const NodeID x = remaining_nodes[position].id;
|
||||
node_levels[orig_node_id_from_new_node_id_map[x]] = current_level;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (auto position = range.begin(), end = range.end(); position != end;
|
||||
++position)
|
||||
{
|
||||
const NodeID x = remaining_nodes[position].id;
|
||||
node_levels[x] = current_level;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// contract independent nodes
|
||||
tbb::parallel_for(
|
||||
tbb::blocked_range<NodeID>(
|
||||
begin_independent_nodes_idx, end_independent_nodes_idx, ContractGrainSize),
|
||||
[this, &remaining_nodes, &thread_data_list](const tbb::blocked_range<NodeID> &range) {
|
||||
ContractorThreadData *data = thread_data_list.GetThreadData();
|
||||
for (auto position = range.begin(), end = range.end(); position != end; ++position)
|
||||
{
|
||||
const NodeID x = remaining_nodes[position].id;
|
||||
this->ContractNode<false>(data, x);
|
||||
}
|
||||
});
|
||||
|
||||
tbb::parallel_for(
|
||||
tbb::blocked_range<NodeID>(
|
||||
begin_independent_nodes_idx, end_independent_nodes_idx, DeleteGrainSize),
|
||||
[this, &remaining_nodes, &thread_data_list](const tbb::blocked_range<NodeID> &range) {
|
||||
ContractorThreadData *data = thread_data_list.GetThreadData();
|
||||
for (auto position = range.begin(), end = range.end(); position != end; ++position)
|
||||
{
|
||||
const NodeID x = remaining_nodes[position].id;
|
||||
this->DeleteIncomingEdges(data, x);
|
||||
}
|
||||
});
|
||||
|
||||
// make sure we really sort each block
|
||||
tbb::parallel_for(thread_data_list.data.range(),
|
||||
[&](const ThreadDataContainer::EnumerableThreadData::range_type &range) {
|
||||
for (auto &data : range)
|
||||
tbb::parallel_sort(data->inserted_edges.begin(),
|
||||
data->inserted_edges.end());
|
||||
});
|
||||
|
||||
// insert new edges
|
||||
for (auto &data : thread_data_list.data)
|
||||
{
|
||||
for (const ContractorEdge &edge : data->inserted_edges)
|
||||
{
|
||||
const EdgeID current_edge_ID = contractor_graph->FindEdge(edge.source, edge.target);
|
||||
if (current_edge_ID < contractor_graph->EndEdges(edge.source))
|
||||
{
|
||||
ContractorGraph::EdgeData ¤t_data =
|
||||
contractor_graph->GetEdgeData(current_edge_ID);
|
||||
if (current_data.shortcut && edge.data.forward == current_data.forward &&
|
||||
edge.data.backward == current_data.backward &&
|
||||
edge.data.weight < current_data.weight)
|
||||
{
|
||||
// found a duplicate edge with smaller weight, update it.
|
||||
current_data = edge.data;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
contractor_graph->InsertEdge(edge.source, edge.target, edge.data);
|
||||
}
|
||||
data->inserted_edges.clear();
|
||||
}
|
||||
|
||||
if (!use_cached_node_priorities)
|
||||
{
|
||||
tbb::parallel_for(
|
||||
tbb::blocked_range<NodeID>(
|
||||
begin_independent_nodes_idx, end_independent_nodes_idx, NeighboursGrainSize),
|
||||
[this, &node_priorities, &remaining_nodes, &node_depth, &thread_data_list](
|
||||
const tbb::blocked_range<NodeID> &range) {
|
||||
ContractorThreadData *data = thread_data_list.GetThreadData();
|
||||
for (auto position = range.begin(), end = range.end(); position != end;
|
||||
++position)
|
||||
{
|
||||
NodeID x = remaining_nodes[position].id;
|
||||
this->UpdateNodeNeighbours(node_priorities, node_depth, data, x);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// remove contracted nodes from the pool
|
||||
BOOST_ASSERT(end_independent_nodes_idx - begin_independent_nodes_idx > 0);
|
||||
number_of_contracted_nodes += end_independent_nodes_idx - begin_independent_nodes_idx;
|
||||
remaining_nodes.resize(begin_independent_nodes_idx);
|
||||
|
||||
p.PrintStatus(number_of_contracted_nodes);
|
||||
++current_level;
|
||||
}
|
||||
|
||||
if (remaining_nodes.size() > 2)
|
||||
{
|
||||
if (orig_node_id_from_new_node_id_map.size() > 0)
|
||||
{
|
||||
tbb::parallel_for(tbb::blocked_range<NodeID>(0, remaining_nodes.size(), InitGrainSize),
|
||||
[this, &remaining_nodes](const tbb::blocked_range<NodeID> &range) {
|
||||
for (auto x = range.begin(), end = range.end(); x != end; ++x)
|
||||
{
|
||||
const auto orig_id = remaining_nodes[x].id;
|
||||
is_core_node[orig_node_id_from_new_node_id_map[orig_id]] =
|
||||
true;
|
||||
}
|
||||
});
|
||||
}
|
||||
else
|
||||
{
|
||||
tbb::parallel_for(tbb::blocked_range<NodeID>(0, remaining_nodes.size(), InitGrainSize),
|
||||
[this, &remaining_nodes](const tbb::blocked_range<NodeID> &range) {
|
||||
for (auto x = range.begin(), end = range.end(); x != end; ++x)
|
||||
{
|
||||
const auto orig_id = remaining_nodes[x].id;
|
||||
is_core_node[orig_id] = true;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// in this case we don't need core markers since we fully contracted
|
||||
// the graph
|
||||
is_core_node.clear();
|
||||
}
|
||||
|
||||
util::Log() << "[core] " << remaining_nodes.size() << " nodes "
|
||||
<< contractor_graph->GetNumberOfEdges() << " edges.";
|
||||
|
||||
thread_data_list.data.clear();
|
||||
}
|
||||
|
||||
void GraphContractor::GetCoreMarker(std::vector<bool> &out_is_core_node)
|
||||
{
|
||||
out_is_core_node.swap(is_core_node);
|
||||
}
|
||||
|
||||
void GraphContractor::GetNodeLevels(std::vector<float> &out_node_levels)
|
||||
{
|
||||
out_node_levels.swap(node_levels);
|
||||
}
|
||||
|
||||
float GraphContractor::EvaluateNodePriority(ContractorThreadData *const data,
|
||||
const NodeDepth node_depth,
|
||||
const NodeID node)
|
||||
{
|
||||
ContractionStats stats;
|
||||
|
||||
// perform simulated contraction
|
||||
ContractNode<true>(data, node, &stats);
|
||||
|
||||
// Result will contain the priority
|
||||
float result;
|
||||
if (0 == (stats.edges_deleted_count * stats.original_edges_deleted_count))
|
||||
{
|
||||
result = 1.f * node_depth;
|
||||
}
|
||||
else
|
||||
{
|
||||
result =
|
||||
2.f * (((float)stats.edges_added_count) / stats.edges_deleted_count) +
|
||||
4.f * (((float)stats.original_edges_added_count) / stats.original_edges_deleted_count) +
|
||||
1.f * node_depth;
|
||||
}
|
||||
BOOST_ASSERT(result >= 0);
|
||||
return result;
|
||||
}
|
||||
|
||||
void GraphContractor::DeleteIncomingEdges(ContractorThreadData *data, const NodeID node)
|
||||
{
|
||||
std::vector<NodeID> &neighbours = data->neighbours;
|
||||
neighbours.clear();
|
||||
|
||||
// find all neighbours
|
||||
for (auto e : contractor_graph->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const NodeID u = contractor_graph->GetTarget(e);
|
||||
if (u != node)
|
||||
{
|
||||
neighbours.push_back(u);
|
||||
}
|
||||
}
|
||||
// eliminate duplicate entries ( forward + backward edges )
|
||||
std::sort(neighbours.begin(), neighbours.end());
|
||||
neighbours.resize(std::unique(neighbours.begin(), neighbours.end()) - neighbours.begin());
|
||||
|
||||
for (const auto i : util::irange<std::size_t>(0, neighbours.size()))
|
||||
{
|
||||
contractor_graph->DeleteEdgesTo(neighbours[i], node);
|
||||
}
|
||||
}
|
||||
|
||||
bool GraphContractor::UpdateNodeNeighbours(std::vector<float> &priorities,
|
||||
std::vector<NodeDepth> &node_depth,
|
||||
ContractorThreadData *const data,
|
||||
const NodeID node)
|
||||
{
|
||||
std::vector<NodeID> &neighbours = data->neighbours;
|
||||
neighbours.clear();
|
||||
|
||||
// find all neighbours
|
||||
for (auto e : contractor_graph->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const NodeID u = contractor_graph->GetTarget(e);
|
||||
if (u == node)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
neighbours.push_back(u);
|
||||
node_depth[u] = std::max(node_depth[node] + 1, node_depth[u]);
|
||||
}
|
||||
// eliminate duplicate entries ( forward + backward edges )
|
||||
std::sort(neighbours.begin(), neighbours.end());
|
||||
neighbours.resize(std::unique(neighbours.begin(), neighbours.end()) - neighbours.begin());
|
||||
|
||||
// re-evaluate priorities of neighboring nodes
|
||||
for (const NodeID u : neighbours)
|
||||
{
|
||||
priorities[u] = EvaluateNodePriority(data, node_depth[u], u);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GraphContractor::IsNodeIndependent(const std::vector<float> &priorities,
|
||||
ContractorThreadData *const data,
|
||||
NodeID node) const
|
||||
{
|
||||
const float priority = priorities[node];
|
||||
|
||||
std::vector<NodeID> &neighbours = data->neighbours;
|
||||
neighbours.clear();
|
||||
|
||||
for (auto e : contractor_graph->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const NodeID target = contractor_graph->GetTarget(e);
|
||||
if (node == target)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
const float target_priority = priorities[target];
|
||||
BOOST_ASSERT(target_priority >= 0);
|
||||
// found a neighbour with lower priority?
|
||||
if (priority > target_priority)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// tie breaking
|
||||
if (std::abs(priority - target_priority) < std::numeric_limits<float>::epsilon() &&
|
||||
Bias(node, target))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
neighbours.push_back(target);
|
||||
}
|
||||
|
||||
std::sort(neighbours.begin(), neighbours.end());
|
||||
neighbours.resize(std::unique(neighbours.begin(), neighbours.end()) - neighbours.begin());
|
||||
|
||||
// examine all neighbours that are at most 2 hops away
|
||||
for (const NodeID u : neighbours)
|
||||
{
|
||||
for (auto e : contractor_graph->GetAdjacentEdgeRange(u))
|
||||
{
|
||||
const NodeID target = contractor_graph->GetTarget(e);
|
||||
if (node == target)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
const float target_priority = priorities[target];
|
||||
BOOST_ASSERT(target_priority >= 0);
|
||||
// found a neighbour with lower priority?
|
||||
if (priority > target_priority)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// tie breaking
|
||||
if (std::abs(priority - target_priority) < std::numeric_limits<float>::epsilon() &&
|
||||
Bias(node, target))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// This bias function takes up 22 assembly instructions in total on X86
|
||||
bool GraphContractor::Bias(const NodeID a, const NodeID b) const
|
||||
{
|
||||
const unsigned short hasha = fast_hash(a);
|
||||
const unsigned short hashb = fast_hash(b);
|
||||
|
||||
// The compiler optimizes that to conditional register flags but without branching
|
||||
// statements!
|
||||
if (hasha != hashb)
|
||||
{
|
||||
return hasha < hashb;
|
||||
}
|
||||
return a < b;
|
||||
}
|
||||
|
||||
} // namespace contractor
|
||||
} // namespace osrm
|
Loading…
Reference in New Issue
Block a user