Renumber nodes after running osrm-partition
The new numbering uses the partition information to sort border nodes first to compactify storages that need access indexed by border node ID. We also get an optimized cache performance for free sincr we can also recursively sort the nodes by cell ID. This implements issue #3779.
This commit is contained in:
parent
a195d7dfd3
commit
0266c9d969
@ -6,6 +6,8 @@
|
||||
Note : the curb side depend on the `ProfileProperties::left_hand_driving`, it's a global property set once by the profile. If you are working with a planet dataset, the api will be wrong in some countries, and right in others.
|
||||
- NodeJs Bindings
|
||||
- new parameter `approaches` for `route`, `table`, `trip` and `nearest` requests.
|
||||
- Tools
|
||||
- `osrm-partition` now ensures it is called before `osrm-contract` and removes inconsitent .hsgr files automatically.
|
||||
- Features
|
||||
- Added conditional restriction support with `parse-conditional-restrictions=true|false` to osrm-extract. This option saves conditional turn restrictions to the .restrictions file for parsing by contract later. Added `parse-conditionals-from-now=utc time stamp` and `--time-zone-file=/path/to/file` to osrm-contract
|
||||
- Files
|
||||
|
@ -243,8 +243,8 @@ module.exports = function () {
|
||||
processedCacheFile: this.processedCacheFile, environment: this.environment};
|
||||
let queue = d3.queue(1);
|
||||
queue.defer(this.extractData.bind(this), p);
|
||||
queue.defer(this.contractData.bind(this), p);
|
||||
queue.defer(this.partitionData.bind(this), p);
|
||||
queue.defer(this.contractData.bind(this), p);
|
||||
queue.defer(this.customizeData.bind(this), p);
|
||||
queue.awaitAll(callback);
|
||||
};
|
||||
|
@ -13,23 +13,6 @@ namespace extractor
|
||||
struct EdgeBasedEdge
|
||||
{
|
||||
public:
|
||||
EdgeBasedEdge();
|
||||
|
||||
template <class EdgeT> explicit EdgeBasedEdge(const EdgeT &other);
|
||||
|
||||
EdgeBasedEdge(const NodeID source,
|
||||
const NodeID target,
|
||||
const NodeID edge_id,
|
||||
const EdgeWeight weight,
|
||||
const EdgeWeight duration,
|
||||
const bool forward,
|
||||
const bool backward);
|
||||
|
||||
bool operator<(const EdgeBasedEdge &other) const;
|
||||
|
||||
NodeID source;
|
||||
NodeID target;
|
||||
|
||||
struct EdgeData
|
||||
{
|
||||
EdgeData() : turn_id(0), weight(0), duration(0), forward(false), backward(false) {}
|
||||
@ -51,7 +34,24 @@ struct EdgeBasedEdge
|
||||
std::uint32_t backward : 1;
|
||||
|
||||
auto is_unidirectional() const { return !forward || !backward; }
|
||||
} data;
|
||||
};
|
||||
|
||||
EdgeBasedEdge();
|
||||
template <class EdgeT> explicit EdgeBasedEdge(const EdgeT &other);
|
||||
EdgeBasedEdge(const NodeID source,
|
||||
const NodeID target,
|
||||
const NodeID edge_id,
|
||||
const EdgeWeight weight,
|
||||
const EdgeWeight duration,
|
||||
const bool forward,
|
||||
const bool backward);
|
||||
EdgeBasedEdge(const NodeID source, const NodeID target, const EdgeBasedEdge::EdgeData &data);
|
||||
|
||||
bool operator<(const EdgeBasedEdge &other) const;
|
||||
|
||||
NodeID source;
|
||||
NodeID target;
|
||||
EdgeData data;
|
||||
};
|
||||
static_assert(sizeof(extractor::EdgeBasedEdge) == 20,
|
||||
"Size of extractor::EdgeBasedEdge type is "
|
||||
@ -73,6 +73,13 @@ inline EdgeBasedEdge::EdgeBasedEdge(const NodeID source,
|
||||
{
|
||||
}
|
||||
|
||||
inline EdgeBasedEdge::EdgeBasedEdge(const NodeID source,
|
||||
const NodeID target,
|
||||
const EdgeBasedEdge::EdgeData &data)
|
||||
: source(source), target(target), data{data}
|
||||
{
|
||||
}
|
||||
|
||||
inline bool EdgeBasedEdge::operator<(const EdgeBasedEdge &other) const
|
||||
{
|
||||
const auto unidirectional = data.is_unidirectional();
|
||||
|
@ -86,10 +86,6 @@ class Extractor
|
||||
std::vector<util::Coordinate> &coordinates,
|
||||
extractor::PackedOSMIDs &osm_node_ids);
|
||||
|
||||
void WriteEdgeBasedGraph(const std::string &output_file_filename,
|
||||
const EdgeID max_edge_id,
|
||||
util::DeallocatingVector<EdgeBasedEdge> const &edge_based_edge_list);
|
||||
|
||||
void WriteIntersectionClassificationData(
|
||||
const std::string &output_file_name,
|
||||
const std::vector<std::uint32_t> &node_based_intersection_classes,
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef OSRM_EXTRACTOR_FILES_HPP
|
||||
#define OSRM_EXTRACTOR_FILES_HPP
|
||||
|
||||
#include "extractor/edge_based_edge.hpp"
|
||||
#include "extractor/guidance/turn_lane_types.hpp"
|
||||
#include "extractor/node_data_container.hpp"
|
||||
#include "extractor/serialization.hpp"
|
||||
@ -19,6 +20,32 @@ namespace extractor
|
||||
namespace files
|
||||
{
|
||||
|
||||
template <typename EdgeBasedEdgeVector>
|
||||
void writeEdgeBasedGraph(const boost::filesystem::path &path,
|
||||
EdgeID const max_edge_id,
|
||||
const EdgeBasedEdgeVector &edge_based_edge_list)
|
||||
{
|
||||
static_assert(std::is_same<typename EdgeBasedEdgeVector::value_type, EdgeBasedEdge>::value, "");
|
||||
|
||||
storage::io::FileWriter writer(path, storage::io::FileWriter::GenerateFingerprint);
|
||||
|
||||
writer.WriteElementCount64(max_edge_id);
|
||||
storage::serialization::write(writer, edge_based_edge_list);
|
||||
}
|
||||
|
||||
template <typename EdgeBasedEdgeVector>
|
||||
void readEdgeBasedGraph(const boost::filesystem::path &path,
|
||||
EdgeID &max_edge_id,
|
||||
EdgeBasedEdgeVector &edge_based_edge_list)
|
||||
{
|
||||
static_assert(std::is_same<typename EdgeBasedEdgeVector::value_type, EdgeBasedEdge>::value, "");
|
||||
|
||||
storage::io::FileReader reader(path, storage::io::FileReader::VerifyFingerprint);
|
||||
|
||||
max_edge_id = reader.ReadElementCount64();
|
||||
storage::serialization::read(reader, edge_based_edge_list);
|
||||
}
|
||||
|
||||
// reads .osrm.nodes
|
||||
template <typename CoordinatesT, typename PackedOSMIDsT>
|
||||
inline void readNodes(const boost::filesystem::path &path,
|
||||
|
@ -1,9 +1,12 @@
|
||||
#ifndef OSRM_EXTRACTOR_NODE_DATA_CONTAINER_HPP
|
||||
#define OSRM_EXTRACTOR_NODE_DATA_CONTAINER_HPP
|
||||
|
||||
#include "extractor/travel_mode.hpp"
|
||||
|
||||
#include "storage/io_fwd.hpp"
|
||||
#include "storage/shared_memory_ownership.hpp"
|
||||
|
||||
#include "util/permutation.hpp"
|
||||
#include "util/typedefs.hpp"
|
||||
#include "util/vector_view.hpp"
|
||||
|
||||
@ -81,6 +84,15 @@ template <storage::Ownership Ownership> class EdgeBasedNodeDataContainerImpl
|
||||
serialization::write<Ownership>(storage::io::FileWriter &writer,
|
||||
const EdgeBasedNodeDataContainerImpl &ebn_data_container);
|
||||
|
||||
template <typename = std::enable_if<Ownership == storage::Ownership::Container>>
|
||||
void Renumber(const std::vector<std::uint32_t> &permutation)
|
||||
{
|
||||
util::inplacePermutation(geometry_ids.begin(), geometry_ids.end(), permutation);
|
||||
util::inplacePermutation(name_ids.begin(), name_ids.end(), permutation);
|
||||
util::inplacePermutation(component_ids.begin(), component_ids.end(), permutation);
|
||||
util::inplacePermutation(travel_modes.begin(), travel_modes.end(), permutation);
|
||||
}
|
||||
|
||||
private:
|
||||
Vector<GeometryID> geometry_ids;
|
||||
Vector<NameID> name_ids;
|
||||
|
@ -21,9 +21,15 @@ namespace partition
|
||||
|
||||
struct EdgeBasedGraphEdgeData : extractor::EdgeBasedEdge::EdgeData
|
||||
{
|
||||
// We need to write out the full edge based graph again.
|
||||
using Base = extractor::EdgeBasedEdge::EdgeData;
|
||||
using Base::Base;
|
||||
|
||||
// TODO: in case we want to modify the graph we need to store a boundary_arc flag here
|
||||
EdgeBasedGraphEdgeData(const EdgeBasedGraphEdgeData &) = default;
|
||||
EdgeBasedGraphEdgeData(EdgeBasedGraphEdgeData &&) = default;
|
||||
EdgeBasedGraphEdgeData &operator=(const EdgeBasedGraphEdgeData &) = default;
|
||||
EdgeBasedGraphEdgeData &operator=(EdgeBasedGraphEdgeData &&) = default;
|
||||
EdgeBasedGraphEdgeData(const Base &base) : Base(base) {}
|
||||
EdgeBasedGraphEdgeData() : Base() {}
|
||||
};
|
||||
|
||||
struct DynamicEdgeBasedGraph : util::DynamicGraph<EdgeBasedGraphEdgeData>
|
||||
|
@ -4,11 +4,15 @@
|
||||
#include "partition/edge_based_graph.hpp"
|
||||
|
||||
#include "extractor/edge_based_edge.hpp"
|
||||
#include "extractor/files.hpp"
|
||||
#include "storage/io.hpp"
|
||||
#include "util/coordinate.hpp"
|
||||
#include "util/dynamic_graph.hpp"
|
||||
#include "util/typedefs.hpp"
|
||||
|
||||
#include <tbb/parallel_reduce.h>
|
||||
#include <tbb/parallel_sort.h>
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include <algorithm>
|
||||
@ -56,124 +60,137 @@ splitBidirectionalEdges(const std::vector<extractor::EdgeBasedEdge> &edges)
|
||||
template <typename OutputEdgeT>
|
||||
std::vector<OutputEdgeT> prepareEdgesForUsageInGraph(std::vector<extractor::EdgeBasedEdge> edges)
|
||||
{
|
||||
std::sort(begin(edges), end(edges));
|
||||
// sort into blocks of edges with same source + target
|
||||
// the we partition by the forward flag to sort all edges with a forward direction first.
|
||||
// the we sort by weight to ensure the first forward edge is the smallest forward edge
|
||||
std::sort(begin(edges), end(edges), [](const auto &lhs, const auto &rhs) {
|
||||
return std::tie(lhs.source, lhs.target, rhs.data.forward, lhs.data.weight) <
|
||||
std::tie(rhs.source, rhs.target, lhs.data.forward, rhs.data.weight);
|
||||
});
|
||||
|
||||
std::vector<OutputEdgeT> graph_edges;
|
||||
graph_edges.reserve(edges.size());
|
||||
std::vector<OutputEdgeT> output_edges;
|
||||
output_edges.reserve(edges.size());
|
||||
|
||||
for (NodeID i = 0; i < edges.size();)
|
||||
for (auto begin_interval = edges.begin(); begin_interval != edges.end();)
|
||||
{
|
||||
const NodeID source = edges[i].source;
|
||||
const NodeID target = edges[i].target;
|
||||
const NodeID source = begin_interval->source;
|
||||
const NodeID target = begin_interval->target;
|
||||
|
||||
auto end_interval =
|
||||
std::find_if_not(begin_interval, edges.end(), [source, target](const auto &edge) {
|
||||
return std::tie(edge.source, edge.target) == std::tie(source, target);
|
||||
});
|
||||
BOOST_ASSERT(begin_interval != end_interval);
|
||||
|
||||
// remove eigenloops
|
||||
if (source == target)
|
||||
{
|
||||
++i;
|
||||
begin_interval = end_interval;
|
||||
continue;
|
||||
}
|
||||
|
||||
OutputEdgeT forward_edge;
|
||||
OutputEdgeT reverse_edge;
|
||||
forward_edge.source = reverse_edge.source = source;
|
||||
forward_edge.target = reverse_edge.target = target;
|
||||
forward_edge.data.turn_id = reverse_edge.data.turn_id = edges[i].data.turn_id;
|
||||
forward_edge.data.weight = reverse_edge.data.weight = INVALID_EDGE_WEIGHT;
|
||||
forward_edge.data.duration = reverse_edge.data.duration = MAXIMAL_EDGE_DURATION_INT_30;
|
||||
forward_edge.data.forward = reverse_edge.data.backward = true;
|
||||
forward_edge.data.backward = reverse_edge.data.forward = false;
|
||||
BOOST_ASSERT_MSG(begin_interval->data.forward != begin_interval->data.backward,
|
||||
"The forward and backward flag need to be mutally exclusive");
|
||||
|
||||
// remove parallel edges
|
||||
while (i < edges.size() && edges[i].source == source && edges[i].target == target)
|
||||
// find smallest backward edge and check if we can merge
|
||||
auto first_backward = std::find_if(
|
||||
begin_interval, end_interval, [](const auto &edge) { return edge.data.backward; });
|
||||
|
||||
// thanks to the sorting we know this is the smallest backward edge
|
||||
// and there is no forward edge
|
||||
if (begin_interval == first_backward)
|
||||
{
|
||||
if (edges[i].data.forward)
|
||||
{
|
||||
forward_edge.data.weight = std::min(edges[i].data.weight, forward_edge.data.weight);
|
||||
forward_edge.data.duration =
|
||||
std::min(edges[i].data.duration, forward_edge.data.duration);
|
||||
}
|
||||
if (edges[i].data.backward)
|
||||
{
|
||||
reverse_edge.data.weight = std::min(edges[i].data.weight, reverse_edge.data.weight);
|
||||
reverse_edge.data.duration =
|
||||
std::min(edges[i].data.duration, reverse_edge.data.duration);
|
||||
}
|
||||
++i;
|
||||
output_edges.push_back(OutputEdgeT{source, target, first_backward->data});
|
||||
}
|
||||
// merge edges (s,t) and (t,s) into bidirectional edge
|
||||
if (forward_edge.data.weight == reverse_edge.data.weight)
|
||||
// only a forward edge, thanks to the sorting this is the smallest
|
||||
else if (first_backward == end_interval)
|
||||
{
|
||||
if ((int)forward_edge.data.weight != INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
forward_edge.data.backward = true;
|
||||
graph_edges.push_back(forward_edge);
|
||||
}
|
||||
output_edges.push_back(OutputEdgeT{source, target, begin_interval->data});
|
||||
}
|
||||
// we have both a forward and a backward edge, we need to evaluate
|
||||
// if we can merge them
|
||||
else
|
||||
{ // insert seperate edges
|
||||
if (((int)forward_edge.data.weight) != INVALID_EDGE_WEIGHT)
|
||||
{
|
||||
BOOST_ASSERT(begin_interval->data.forward);
|
||||
BOOST_ASSERT(first_backward->data.backward);
|
||||
BOOST_ASSERT(first_backward != end_interval);
|
||||
|
||||
// same weight, so we can just merge them
|
||||
if (begin_interval->data.weight == first_backward->data.weight)
|
||||
{
|
||||
graph_edges.push_back(forward_edge);
|
||||
OutputEdgeT merged{source, target, begin_interval->data};
|
||||
merged.data.backward = true;
|
||||
output_edges.push_back(std::move(merged));
|
||||
}
|
||||
if ((int)reverse_edge.data.weight != INVALID_EDGE_WEIGHT)
|
||||
// we need to insert separate forward and reverse edges
|
||||
else
|
||||
{
|
||||
graph_edges.push_back(reverse_edge);
|
||||
output_edges.push_back(OutputEdgeT{source, target, begin_interval->data});
|
||||
output_edges.push_back(OutputEdgeT{source, target, first_backward->data});
|
||||
}
|
||||
}
|
||||
|
||||
begin_interval = end_interval;
|
||||
}
|
||||
|
||||
return graph_edges;
|
||||
return output_edges;
|
||||
}
|
||||
|
||||
struct EdgeBasedGraphReader
|
||||
std::vector<extractor::EdgeBasedEdge> graphToEdges(const DynamicEdgeBasedGraph &edge_based_graph)
|
||||
{
|
||||
EdgeBasedGraphReader(storage::io::FileReader &reader)
|
||||
{
|
||||
// Reads: | Fingerprint | #e | max_eid | edges |
|
||||
// - uint64: number of edges
|
||||
// - EdgeID: max edge id
|
||||
// - extractor::EdgeBasedEdge edges
|
||||
//
|
||||
// Gets written in Extractor::WriteEdgeBasedGraph
|
||||
auto range = tbb::blocked_range<NodeID>(0, edge_based_graph.GetNumberOfNodes());
|
||||
auto max_turn_id =
|
||||
tbb::parallel_reduce(range,
|
||||
NodeID{0},
|
||||
[&edge_based_graph](const auto range, NodeID initial) {
|
||||
NodeID max_turn_id = initial;
|
||||
for (auto node = range.begin(); node < range.end(); ++node)
|
||||
{
|
||||
for (auto edge : edge_based_graph.GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const auto &data = edge_based_graph.GetEdgeData(edge);
|
||||
max_turn_id = std::max(max_turn_id, data.turn_id);
|
||||
}
|
||||
}
|
||||
return max_turn_id;
|
||||
},
|
||||
[](const NodeID lhs, const NodeID rhs) { return std::max(lhs, rhs); });
|
||||
|
||||
const auto num_edges = reader.ReadElementCount64();
|
||||
const auto max_edge_id = reader.ReadOne<EdgeID>();
|
||||
std::vector<extractor::EdgeBasedEdge> edges(max_turn_id + 1);
|
||||
tbb::parallel_for(range, [&](const auto range) {
|
||||
for (auto node = range.begin(); node < range.end(); ++node)
|
||||
{
|
||||
for (auto edge : edge_based_graph.GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const auto &data = edge_based_graph.GetEdgeData(edge);
|
||||
// we only need to save the forward edges, since the read method will
|
||||
// convert from forward to bi-directional edges again
|
||||
if (data.forward)
|
||||
{
|
||||
auto target = edge_based_graph.GetTarget(edge);
|
||||
BOOST_ASSERT(data.turn_id <= max_turn_id);
|
||||
edges[data.turn_id] = extractor::EdgeBasedEdge{node, target, data};
|
||||
// only save the forward edge
|
||||
edges[data.turn_id].data.forward = true;
|
||||
edges[data.turn_id].data.backward = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
num_nodes = max_edge_id + 1;
|
||||
return edges;
|
||||
}
|
||||
|
||||
edges.resize(num_edges);
|
||||
reader.ReadInto(edges);
|
||||
}
|
||||
|
||||
// FIXME: wrapped in unique_ptr since dynamic_graph is not move-able
|
||||
|
||||
std::unique_ptr<DynamicEdgeBasedGraph> BuildEdgeBasedGraph()
|
||||
{
|
||||
// FIXME: The following is a rough adaption from:
|
||||
// - adaptToContractorInput
|
||||
// - GraphContractor::GraphContractor
|
||||
// and should really be abstracted over.
|
||||
// FIXME: edges passed as a const reference, can be changed pass-by-value if can be moved
|
||||
|
||||
auto directed = splitBidirectionalEdges(edges);
|
||||
auto tidied = prepareEdgesForUsageInGraph<DynamicEdgeBasedGraphEdge>(std::move(directed));
|
||||
|
||||
return std::make_unique<DynamicEdgeBasedGraph>(num_nodes, std::move(tidied));
|
||||
}
|
||||
|
||||
private:
|
||||
inline DynamicEdgeBasedGraph LoadEdgeBasedGraph(const boost::filesystem::path &path)
|
||||
{
|
||||
EdgeID max_node_id;
|
||||
std::vector<extractor::EdgeBasedEdge> edges;
|
||||
std::size_t num_nodes;
|
||||
};
|
||||
extractor::files::readEdgeBasedGraph(path, max_node_id, edges);
|
||||
|
||||
inline std::unique_ptr<DynamicEdgeBasedGraph> LoadEdgeBasedGraph(const std::string &path)
|
||||
{
|
||||
const auto fingerprint = storage::io::FileReader::VerifyFingerprint;
|
||||
storage::io::FileReader reader(path, fingerprint);
|
||||
auto directed = splitBidirectionalEdges(edges);
|
||||
auto tidied = prepareEdgesForUsageInGraph<DynamicEdgeBasedGraphEdge>(std::move(directed));
|
||||
|
||||
EdgeBasedGraphReader builder{reader};
|
||||
|
||||
return builder.BuildEdgeBasedGraph();
|
||||
return DynamicEdgeBasedGraph(max_node_id + 1, std::move(tidied));
|
||||
}
|
||||
|
||||
} // ns partition
|
||||
|
@ -38,9 +38,11 @@ struct PartitionConfig
|
||||
edge_based_graph_path = basepath + ".osrm.ebg";
|
||||
compressed_node_based_graph_path = basepath + ".osrm.cnbg";
|
||||
cnbg_ebg_mapping_path = basepath + ".osrm.cnbg_to_ebg";
|
||||
file_index_path = basepath + ".osrm.fileIndex";
|
||||
partition_path = basepath + ".osrm.partition";
|
||||
mld_partition_path = basepath + ".osrm.partition";
|
||||
mld_storage_path = basepath + ".osrm.cells";
|
||||
storage_path = basepath + ".osrm.cells";
|
||||
node_data_path = basepath + ".osrm.ebg_nodes";
|
||||
hsgr_path = basepath + ".osrm.hsgr";
|
||||
}
|
||||
|
||||
// might be changed to the node based graph at some point
|
||||
@ -49,8 +51,10 @@ struct PartitionConfig
|
||||
boost::filesystem::path compressed_node_based_graph_path;
|
||||
boost::filesystem::path cnbg_ebg_mapping_path;
|
||||
boost::filesystem::path partition_path;
|
||||
boost::filesystem::path mld_partition_path;
|
||||
boost::filesystem::path mld_storage_path;
|
||||
boost::filesystem::path file_index_path;
|
||||
boost::filesystem::path storage_path;
|
||||
boost::filesystem::path node_data_path;
|
||||
boost::filesystem::path hsgr_path;
|
||||
|
||||
unsigned requested_num_threads;
|
||||
|
||||
|
65
include/partition/renumber.hpp
Normal file
65
include/partition/renumber.hpp
Normal file
@ -0,0 +1,65 @@
|
||||
#ifndef OSRM_PARTITION_RENUMBER_HPP
|
||||
#define OSRM_PARTITION_RENUMBER_HPP
|
||||
|
||||
#include "extractor/edge_based_node_segment.hpp"
|
||||
#include "extractor/node_data_container.hpp"
|
||||
|
||||
#include "partition/bisection_to_partition.hpp"
|
||||
#include "partition/edge_based_graph.hpp"
|
||||
|
||||
#include "util/dynamic_graph.hpp"
|
||||
#include "util/static_graph.hpp"
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace partition
|
||||
{
|
||||
std::vector<std::uint32_t> makePermutation(const DynamicEdgeBasedGraph &graph,
|
||||
const std::vector<Partition> &partitions);
|
||||
|
||||
template <typename EdgeDataT>
|
||||
inline void renumber(util::DynamicGraph<EdgeDataT> &graph,
|
||||
const std::vector<std::uint32_t> &permutation)
|
||||
{
|
||||
// dynamic graph has own specilization
|
||||
graph.Renumber(permutation);
|
||||
}
|
||||
|
||||
template <typename EdgeDataT>
|
||||
inline void renumber(util::StaticGraph<EdgeDataT> &graph,
|
||||
const std::vector<std::uint32_t> &permutation)
|
||||
{
|
||||
// static graph has own specilization
|
||||
graph.Renumber(permutation);
|
||||
}
|
||||
|
||||
inline void renumber(extractor::EdgeBasedNodeDataContainer &node_data_container,
|
||||
const std::vector<std::uint32_t> &permutation)
|
||||
{
|
||||
node_data_container.Renumber(permutation);
|
||||
}
|
||||
|
||||
inline void renumber(std::vector<Partition> &partitions,
|
||||
const std::vector<std::uint32_t> &permutation)
|
||||
{
|
||||
for (auto &partition : partitions)
|
||||
{
|
||||
util::inplacePermutation(partition.begin(), partition.end(), permutation);
|
||||
}
|
||||
}
|
||||
|
||||
inline void renumber(util::vector_view<extractor::EdgeBasedNodeSegment> &segments,
|
||||
const std::vector<std::uint32_t> &permutation)
|
||||
{
|
||||
for (auto &segment : segments)
|
||||
{
|
||||
BOOST_ASSERT(segment.forward_segment_id.enabled);
|
||||
segment.forward_segment_id.id = permutation[segment.forward_segment_id.id];
|
||||
if (segment.reverse_segment_id.enabled)
|
||||
segment.reverse_segment_id.id = permutation[segment.reverse_segment_id.id];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
@ -1,6 +1,7 @@
|
||||
#ifndef OSRM_STORAGE_SERIALIZATION_HPP
|
||||
#define OSRM_STORAGE_SERIALIZATION_HPP
|
||||
|
||||
#include "util/deallocating_vector.hpp"
|
||||
#include "util/integer_range.hpp"
|
||||
#include "util/vector_view.hpp"
|
||||
|
||||
@ -14,6 +15,49 @@ namespace storage
|
||||
{
|
||||
namespace serialization
|
||||
{
|
||||
|
||||
/* All vector formats here use the same on-disk format.
|
||||
* This is important because we want to be able to write from a vector
|
||||
* of one kind, but read it into a vector of another kind.
|
||||
*
|
||||
* All vector types with this guarantee should be placed in this file.
|
||||
*/
|
||||
|
||||
template <typename T>
|
||||
inline void read(storage::io::FileReader &reader, util::DeallocatingVector<T> &vec)
|
||||
{
|
||||
vec.current_size = reader.ReadElementCount64(vec.current_size);
|
||||
std::size_t num_blocks =
|
||||
std::ceil(vec.current_size / util::DeallocatingVector<T>::ELEMENTS_PER_BLOCK);
|
||||
vec.bucket_list.resize(num_blocks);
|
||||
// Read all but the last block which can be partital
|
||||
for (auto bucket_index : util::irange<std::size_t>(0, num_blocks - 1))
|
||||
{
|
||||
vec.bucket_list[bucket_index] = new T[util::DeallocatingVector<T>::ELEMENTS_PER_BLOCK];
|
||||
reader.ReadInto(vec.bucket_list[bucket_index],
|
||||
util::DeallocatingVector<T>::ELEMENTS_PER_BLOCK);
|
||||
}
|
||||
std::size_t last_block_size =
|
||||
vec.current_size % util::DeallocatingVector<T>::ELEMENTS_PER_BLOCK;
|
||||
vec.bucket_list.back() = new T[util::DeallocatingVector<T>::ELEMENTS_PER_BLOCK];
|
||||
reader.ReadInto(vec.bucket_list.back(), last_block_size);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void write(storage::io::FileWriter &writer, const util::DeallocatingVector<T> &vec)
|
||||
{
|
||||
writer.WriteElementCount64(vec.current_size);
|
||||
// Write all but the last block which can be partially filled
|
||||
for (auto bucket_index : util::irange<std::size_t>(0, vec.bucket_list.size() - 1))
|
||||
{
|
||||
writer.WriteFrom(vec.bucket_list[bucket_index],
|
||||
util::DeallocatingVector<T>::ELEMENTS_PER_BLOCK);
|
||||
}
|
||||
std::size_t last_block_size =
|
||||
vec.current_size % util::DeallocatingVector<T>::ELEMENTS_PER_BLOCK;
|
||||
writer.WriteFrom(vec.bucket_list.back(), last_block_size);
|
||||
}
|
||||
|
||||
template <typename T> inline void read(storage::io::FileReader &reader, stxxl::vector<T> &vec)
|
||||
{
|
||||
auto size = reader.ReadOne<std::uint64_t>();
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef DEALLOCATING_VECTOR_HPP
|
||||
#define DEALLOCATING_VECTOR_HPP
|
||||
|
||||
#include "storage/io_fwd.hpp"
|
||||
#include "util/integer_range.hpp"
|
||||
|
||||
#include <boost/iterator/iterator_facade.hpp>
|
||||
@ -13,7 +14,23 @@ namespace osrm
|
||||
{
|
||||
namespace util
|
||||
{
|
||||
template <typename ElementT> class DeallocatingVector;
|
||||
}
|
||||
|
||||
namespace storage
|
||||
{
|
||||
namespace serialization
|
||||
{
|
||||
template <typename T>
|
||||
inline void read(storage::io::FileReader &reader, util::DeallocatingVector<T> &vec);
|
||||
|
||||
template <typename T>
|
||||
inline void write(storage::io::FileWriter &writer, const util::DeallocatingVector<T> &vec);
|
||||
}
|
||||
}
|
||||
|
||||
namespace util
|
||||
{
|
||||
template <typename ElementT> struct ConstDeallocatingVectorIteratorState
|
||||
{
|
||||
ConstDeallocatingVectorIteratorState()
|
||||
@ -216,18 +233,16 @@ class DeallocatingVectorRemoveIterator
|
||||
}
|
||||
};
|
||||
|
||||
template <typename ElementT, std::size_t ELEMENTS_PER_BLOCK> class DeallocatingVector;
|
||||
template <typename T> void swap(DeallocatingVector<T> &lhs, DeallocatingVector<T> &rhs);
|
||||
|
||||
template <typename T, std::size_t S>
|
||||
void swap(DeallocatingVector<T, S> &lhs, DeallocatingVector<T, S> &rhs);
|
||||
|
||||
template <typename ElementT, std::size_t ELEMENTS_PER_BLOCK = 8388608 / sizeof(ElementT)>
|
||||
class DeallocatingVector
|
||||
template <typename ElementT> class DeallocatingVector
|
||||
{
|
||||
static constexpr std::size_t ELEMENTS_PER_BLOCK = 8388608 / sizeof(ElementT);
|
||||
std::size_t current_size;
|
||||
std::vector<ElementT *> bucket_list;
|
||||
|
||||
public:
|
||||
using value_type = ElementT;
|
||||
using iterator = DeallocatingVectorIterator<ElementT, ELEMENTS_PER_BLOCK>;
|
||||
using const_iterator = ConstDeallocatingVectorIterator<ElementT, ELEMENTS_PER_BLOCK>;
|
||||
|
||||
@ -248,10 +263,9 @@ class DeallocatingVector
|
||||
|
||||
~DeallocatingVector() { clear(); }
|
||||
|
||||
friend void swap<>(DeallocatingVector<ElementT, ELEMENTS_PER_BLOCK> &lhs,
|
||||
DeallocatingVector<ElementT, ELEMENTS_PER_BLOCK> &rhs);
|
||||
friend void swap<>(DeallocatingVector<ElementT> &lhs, DeallocatingVector<ElementT> &rhs);
|
||||
|
||||
void swap(DeallocatingVector<ElementT, ELEMENTS_PER_BLOCK> &other)
|
||||
void swap(DeallocatingVector<ElementT> &other)
|
||||
{
|
||||
std::swap(current_size, other.current_size);
|
||||
bucket_list.swap(other.bucket_list);
|
||||
@ -377,10 +391,14 @@ class DeallocatingVector
|
||||
++position;
|
||||
}
|
||||
}
|
||||
|
||||
friend void storage::serialization::read<ElementT>(storage::io::FileReader &reader,
|
||||
DeallocatingVector &vec);
|
||||
friend void storage::serialization::write<ElementT>(storage::io::FileWriter &writer,
|
||||
const DeallocatingVector &vec);
|
||||
};
|
||||
|
||||
template <typename T, std::size_t S>
|
||||
void swap(DeallocatingVector<T, S> &lhs, DeallocatingVector<T, S> &rhs)
|
||||
template <typename T> void swap(DeallocatingVector<T> &lhs, DeallocatingVector<T> &rhs)
|
||||
{
|
||||
lhs.swap(rhs);
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
#include "util/deallocating_vector.hpp"
|
||||
#include "util/integer_range.hpp"
|
||||
#include "util/permutation.hpp"
|
||||
#include "util/typedefs.hpp"
|
||||
|
||||
#include "storage/io_fwd.hpp"
|
||||
@ -117,6 +118,28 @@ template <typename EdgeDataT> class DynamicGraph
|
||||
}
|
||||
}
|
||||
|
||||
DynamicGraph(DynamicGraph &&other)
|
||||
{
|
||||
number_of_nodes = other.number_of_nodes;
|
||||
// atomics can't be moved this is why we need an own constructor
|
||||
number_of_edges = static_cast<std::uint32_t>(other.number_of_edges);
|
||||
|
||||
node_array = std::move(other.node_array);
|
||||
edge_list = std::move(other.edge_list);
|
||||
}
|
||||
|
||||
DynamicGraph &operator=(DynamicGraph &&other)
|
||||
{
|
||||
number_of_nodes = other.number_of_nodes;
|
||||
// atomics can't be moved this is why we need an own constructor
|
||||
number_of_edges = static_cast<std::uint32_t>(other.number_of_edges);
|
||||
|
||||
node_array = std::move(other.node_array);
|
||||
edge_list = std::move(other.edge_list);
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
unsigned GetNumberOfNodes() const { return number_of_nodes; }
|
||||
|
||||
unsigned GetNumberOfEdges() const { return number_of_edges; }
|
||||
@ -309,6 +332,36 @@ template <typename EdgeDataT> class DynamicGraph
|
||||
return current_iterator;
|
||||
}
|
||||
|
||||
void Renumber(const std::vector<NodeID> &old_to_new_node)
|
||||
{
|
||||
// permutate everything but the sentinel
|
||||
util::inplacePermutation(node_array.begin(), std::prev(node_array.end()), old_to_new_node);
|
||||
|
||||
// Build up edge permutation
|
||||
auto new_edge_index = 0;
|
||||
std::vector<EdgeID> old_to_new_edge(edge_list.size(), SPECIAL_EDGEID);
|
||||
for (auto node : util::irange<NodeID>(0, number_of_nodes))
|
||||
{
|
||||
auto new_first_edge = new_edge_index;
|
||||
// move all filled edges
|
||||
for (auto edge : GetAdjacentEdgeRange(node))
|
||||
{
|
||||
edge_list[edge].target = old_to_new_node[edge_list[edge].target];
|
||||
old_to_new_edge[edge] = new_edge_index++;
|
||||
}
|
||||
// and all adjacent empty edges
|
||||
for (auto edge = EndEdges(node); edge < number_of_edges && isDummy(edge); edge++)
|
||||
{
|
||||
old_to_new_edge[edge] = new_edge_index++;
|
||||
}
|
||||
node_array[node].first_edge = new_first_edge;
|
||||
}
|
||||
BOOST_ASSERT(std::find(old_to_new_edge.begin(), old_to_new_edge.end(), SPECIAL_EDGEID) ==
|
||||
old_to_new_edge.end());
|
||||
|
||||
util::inplacePermutation(edge_list.begin(), edge_list.end(), old_to_new_edge);
|
||||
}
|
||||
|
||||
protected:
|
||||
bool isDummy(const EdgeIterator edge) const
|
||||
{
|
||||
|
54
include/util/mmap_file.hpp
Normal file
54
include/util/mmap_file.hpp
Normal file
@ -0,0 +1,54 @@
|
||||
#ifndef OSRM_UTIL_MMAP_FILE_HPP
|
||||
#define OSRM_UTIL_MMAP_FILE_HPP
|
||||
|
||||
#include "util/exception.hpp"
|
||||
#include "util/exception_utils.hpp"
|
||||
#include "util/vector_view.hpp"
|
||||
|
||||
#include <boost/filesystem/path.hpp>
|
||||
#include <boost/iostreams/device/mapped_file.hpp>
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace util
|
||||
{
|
||||
|
||||
namespace detail
|
||||
{
|
||||
template <typename T, typename RegionT>
|
||||
util::vector_view<T> mmapFile(const boost::filesystem::path &file, RegionT ®ion)
|
||||
{
|
||||
try
|
||||
{
|
||||
region.open(file);
|
||||
std::size_t num_objects = region.size() / sizeof(T);
|
||||
auto data_ptr = region.data();
|
||||
BOOST_ASSERT(reinterpret_cast<uintptr_t>(data_ptr) % alignof(T) == 0);
|
||||
return util::vector_view<T>(reinterpret_cast<T *>(data_ptr), num_objects);
|
||||
}
|
||||
catch (const std::exception &exc)
|
||||
{
|
||||
throw exception(
|
||||
boost::str(boost::format("File %1% mapping failed: %2%") % file % exc.what()) +
|
||||
SOURCE_REF);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
util::vector_view<const T> mmapFile(const boost::filesystem::path &file,
|
||||
boost::iostreams::mapped_file_source ®ion)
|
||||
{
|
||||
return detail::mmapFile<const T>(file, region);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
util::vector_view<T> mmapFile(const boost::filesystem::path &file,
|
||||
boost::iostreams::mapped_file ®ion)
|
||||
{
|
||||
return detail::mmapFile<T>(file, region);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
44
include/util/permutation.hpp
Normal file
44
include/util/permutation.hpp
Normal file
@ -0,0 +1,44 @@
|
||||
#ifndef OSRM_UTIL_PERMUTATION_HPP
|
||||
#define OSRM_UTIL_PERMUTATION_HPP
|
||||
|
||||
#include "util/integer_range.hpp"
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace util
|
||||
{
|
||||
|
||||
template <typename RandomAccesIterator, typename IndexT>
|
||||
void inplacePermutation(RandomAccesIterator begin,
|
||||
RandomAccesIterator end,
|
||||
const std::vector<IndexT> &old_to_new)
|
||||
{
|
||||
std::size_t size = std::distance(begin, end);
|
||||
BOOST_ASSERT(old_to_new.size() == size);
|
||||
// we need a little bit auxililary space since we need to mark
|
||||
// replaced elements in a non-destructive way
|
||||
std::vector<bool> was_replaced(size, false);
|
||||
for (auto index : util::irange<IndexT>(0, size))
|
||||
{
|
||||
if (was_replaced[index])
|
||||
continue;
|
||||
|
||||
// iterate over a cycle in the permutation
|
||||
auto buffer = begin[index];
|
||||
auto old_index = index;
|
||||
auto new_index = old_to_new[old_index];
|
||||
for (; new_index != index; old_index = new_index, new_index = old_to_new[new_index])
|
||||
{
|
||||
was_replaced[old_index] = true;
|
||||
std::swap(buffer, begin[new_index]);
|
||||
}
|
||||
was_replaced[old_index] = true;
|
||||
std::swap(buffer, begin[index]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
@ -34,6 +34,8 @@ inline void read(storage::io::FileReader &reader, StaticGraph<EdgeDataT, Ownersh
|
||||
{
|
||||
storage::serialization::read(reader, graph.node_array);
|
||||
storage::serialization::read(reader, graph.edge_array);
|
||||
graph.number_of_nodes = graph.node_array.size() - 1;
|
||||
graph.number_of_edges = graph.edge_array.size();
|
||||
}
|
||||
|
||||
template <typename EdgeDataT, storage::Ownership Ownership>
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include "util/graph_traits.hpp"
|
||||
#include "util/integer_range.hpp"
|
||||
#include "util/percent.hpp"
|
||||
#include "util/permutation.hpp"
|
||||
#include "util/typedefs.hpp"
|
||||
#include "util/vector_view.hpp"
|
||||
|
||||
@ -146,6 +147,7 @@ class StaticGraph
|
||||
number_of_nodes = static_cast<decltype(number_of_nodes)>(node_array.size() - 1);
|
||||
number_of_edges = static_cast<decltype(number_of_edges)>(node_array.back().first_edge);
|
||||
BOOST_ASSERT(number_of_edges <= edge_array.size());
|
||||
BOOST_ASSERT(number_of_nodes == node_array.size() - 1);
|
||||
}
|
||||
|
||||
unsigned GetNumberOfNodes() const { return number_of_nodes; }
|
||||
@ -241,6 +243,35 @@ class StaticGraph
|
||||
return current_iterator;
|
||||
}
|
||||
|
||||
void Renumber(const std::vector<NodeID> &old_to_new_node)
|
||||
{
|
||||
std::vector<NodeID> new_to_old_node(number_of_nodes);
|
||||
for (auto node : util::irange<NodeID>(0, number_of_nodes))
|
||||
new_to_old_node[old_to_new_node[node]] = node;
|
||||
|
||||
Vector<NodeArrayEntry> new_node_array(node_array.size());
|
||||
|
||||
// Build up edge permutation
|
||||
auto new_edge_index = 0;
|
||||
std::vector<EdgeID> old_to_new_edge(edge_array.size(), SPECIAL_EDGEID);
|
||||
for (auto node : util::irange<NodeID>(0, number_of_nodes))
|
||||
{
|
||||
auto new_first_edge = new_edge_index;
|
||||
for (auto edge : GetAdjacentEdgeRange(new_to_old_node[node]))
|
||||
{
|
||||
edge_array[edge].target = old_to_new_node[edge_array[edge].target];
|
||||
old_to_new_edge[edge] = new_edge_index++;
|
||||
}
|
||||
new_node_array[node].first_edge = new_first_edge;
|
||||
}
|
||||
new_node_array.back().first_edge = new_edge_index;
|
||||
node_array = std::move(new_node_array);
|
||||
BOOST_ASSERT(std::find(old_to_new_edge.begin(), old_to_new_edge.end(), SPECIAL_EDGEID) ==
|
||||
old_to_new_edge.end());
|
||||
|
||||
util::inplacePermutation(edge_array.begin(), edge_array.end(), old_to_new_edge);
|
||||
}
|
||||
|
||||
friend void serialization::read<EdgeDataT, Ownership>(storage::io::FileReader &reader,
|
||||
StaticGraph<EdgeDataT, Ownership> &graph);
|
||||
friend void
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "util/exception.hpp"
|
||||
#include "util/hilbert_value.hpp"
|
||||
#include "util/integer_range.hpp"
|
||||
#include "util/mmap_file.hpp"
|
||||
#include "util/rectangle.hpp"
|
||||
#include "util/typedefs.hpp"
|
||||
#include "util/vector_view.hpp"
|
||||
@ -456,9 +457,8 @@ class StaticRTree
|
||||
tree_node_file.WriteOne(static_cast<std::uint64_t>(m_tree_level_sizes.size()));
|
||||
tree_node_file.WriteFrom(m_tree_level_sizes);
|
||||
}
|
||||
// Map the leaf nodes file so that the r-tree object is immediately usable (i.e. the
|
||||
// constructor doesn't just build and serialize the tree, it gives us a usable r-tree).
|
||||
MapLeafNodesFile(leaf_node_filename);
|
||||
|
||||
m_objects = mmapFile<EdgeDataT>(leaf_node_filename, m_objects_region);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -488,7 +488,7 @@ class StaticRTree
|
||||
m_tree_level_sizes.end() - 1,
|
||||
std::back_inserter(m_tree_level_starts));
|
||||
|
||||
MapLeafNodesFile(leaf_file);
|
||||
m_objects = mmapFile<EdgeDataT>(leaf_file, m_objects_region);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -512,40 +512,7 @@ class StaticRTree
|
||||
std::partial_sum(m_tree_level_sizes.begin(),
|
||||
m_tree_level_sizes.end() - 1,
|
||||
std::back_inserter(m_tree_level_starts));
|
||||
MapLeafNodesFile(leaf_file);
|
||||
}
|
||||
|
||||
/**
|
||||
* mmap()s the .fileIndex file and wrapps it in a read-only vector_view object
|
||||
* for easy access.
|
||||
*/
|
||||
void MapLeafNodesFile(const boost::filesystem::path &leaf_file)
|
||||
{
|
||||
// open leaf node file and return a pointer to the mapped leaves data
|
||||
try
|
||||
{
|
||||
m_objects_region.open(leaf_file);
|
||||
std::size_t num_objects = m_objects_region.size() / sizeof(EdgeDataT);
|
||||
auto data_ptr = m_objects_region.data();
|
||||
BOOST_ASSERT(reinterpret_cast<uintptr_t>(data_ptr) % alignof(EdgeDataT) == 0);
|
||||
BOOST_ASSERT(m_search_tree.size() > 0);
|
||||
BOOST_ASSERT(m_tree_level_sizes.size() > 0);
|
||||
// Verify that there are at least enough objects to fill the bottom of the leaf nods
|
||||
// This is a rough check for correct file length. It's not strictly correct, it
|
||||
// misses the last LEAF_NODE_SIZE-1 nodes, but it should generally be good enough
|
||||
// to catch most problems. The second test is for when the m_objects array is perfectly
|
||||
// filled and has a size that is dividable by LEAF_NODE_SIZE without a remainder
|
||||
BOOST_ASSERT(m_tree_level_sizes.back() - 1 ==
|
||||
std::floor(num_objects / LEAF_NODE_SIZE) ||
|
||||
m_tree_level_sizes.back() == std::floor(num_objects / LEAF_NODE_SIZE));
|
||||
m_objects.reset(reinterpret_cast<const EdgeDataT *>(data_ptr), num_objects);
|
||||
}
|
||||
catch (const std::exception &exc)
|
||||
{
|
||||
throw exception(boost::str(boost::format("Leaf file %1% mapping failed: %2%") %
|
||||
leaf_file % exc.what()) +
|
||||
SOURCE_REF);
|
||||
}
|
||||
m_objects = mmapFile<EdgeDataT>(leaf_file, m_objects_region);
|
||||
}
|
||||
|
||||
/* Returns all features inside the bounding box.
|
||||
|
94
include/util/timed_histogram.hpp
Normal file
94
include/util/timed_histogram.hpp
Normal file
@ -0,0 +1,94 @@
|
||||
#ifndef OSRM_UTIL_TIMED_HISTOGRAM_HPP
|
||||
#define OSRM_UTIL_TIMED_HISTOGRAM_HPP
|
||||
|
||||
#include "util/integer_range.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
#include <sstream>
|
||||
#include <vector>
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace util
|
||||
{
|
||||
namespace detail
|
||||
{
|
||||
extern std::atomic_uint operation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Captures a histogram with a bin size of `IndexBinSize` every `TimeBinSize` count operations.
|
||||
*/
|
||||
template <std::size_t TimeBinSize = 1000, std::size_t IndexBinSize = 1000> class TimedHistogram
|
||||
{
|
||||
public:
|
||||
void Count(std::size_t pos)
|
||||
{
|
||||
std::lock_guard<std::mutex> guard(frames_lock);
|
||||
auto frame_index = detail::operation++ / TimeBinSize;
|
||||
|
||||
while (frame_offsets.size() <= frame_index)
|
||||
{
|
||||
frame_offsets.push_back(frame_counters.size());
|
||||
}
|
||||
BOOST_ASSERT(frame_offsets.size() == frame_index + 1);
|
||||
|
||||
auto frame_offset = frame_offsets.back();
|
||||
auto counter_index = frame_offset + pos / IndexBinSize;
|
||||
|
||||
while (counter_index >= frame_counters.size())
|
||||
{
|
||||
frame_counters.push_back(0);
|
||||
}
|
||||
|
||||
BOOST_ASSERT(frame_counters.size() > counter_index);
|
||||
frame_counters[counter_index]++;
|
||||
}
|
||||
|
||||
// Returns the measurments as a CSV file with the columns:
|
||||
// frame_id,index_bin,count
|
||||
std::string DumpCSV() const
|
||||
{
|
||||
std::stringstream out;
|
||||
|
||||
const auto print_bins = [&out](auto frame_index, auto begin, auto end) {
|
||||
auto bin_index = 0;
|
||||
std::for_each(begin, end, [&](const auto count) {
|
||||
if (count > 0)
|
||||
{
|
||||
out << (frame_index * TimeBinSize) << "," << (bin_index * IndexBinSize) << ","
|
||||
<< count << std::endl;
|
||||
}
|
||||
bin_index++;
|
||||
});
|
||||
};
|
||||
|
||||
if (frame_offsets.size() == 0)
|
||||
{
|
||||
return "";
|
||||
}
|
||||
|
||||
for (const auto frame_index : irange<std::size_t>(0, frame_offsets.size() - 1))
|
||||
{
|
||||
auto begin = frame_counters.begin() + frame_offsets[frame_index];
|
||||
auto end = frame_counters.begin() + frame_offsets[frame_index + 1];
|
||||
print_bins(frame_index, begin, end);
|
||||
}
|
||||
print_bins(frame_offsets.size() - 1,
|
||||
frame_counters.begin() + frame_offsets.back(),
|
||||
frame_counters.end());
|
||||
|
||||
return out.str();
|
||||
}
|
||||
|
||||
private:
|
||||
std::mutex frames_lock;
|
||||
std::vector<std::uint32_t> frame_offsets;
|
||||
std::vector<std::uint32_t> frame_counters;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
@ -178,7 +178,13 @@ int Extractor::run(ScriptingEnvironment &scripting_environment)
|
||||
files::writeNodes(config.node_based_nodes_data_path, coordinates, osm_node_ids);
|
||||
files::writeNodeData(config.edge_based_nodes_data_path, edge_based_nodes_container);
|
||||
|
||||
WriteEdgeBasedGraph(config.edge_graph_output_path, max_edge_id, edge_based_edge_list);
|
||||
util::Log() << "Writing edge-based-graph edges ... " << std::flush;
|
||||
TIMER_START(write_edges);
|
||||
files::writeEdgeBasedGraph(config.edge_graph_output_path, max_edge_id, edge_based_edge_list);
|
||||
TIMER_STOP(write_edges);
|
||||
util::Log() << "ok, after " << TIMER_SEC(write_edges) << "s";
|
||||
|
||||
util::Log() << "Processed " << edge_based_edge_list.size() << " edges";
|
||||
|
||||
const auto nodes_per_second =
|
||||
static_cast<std::uint64_t>(number_of_node_based_nodes / TIMER_SEC(expansion));
|
||||
@ -578,32 +584,6 @@ void Extractor::BuildRTree(std::vector<EdgeBasedNodeSegment> edge_based_node_seg
|
||||
util::Log() << "finished r-tree construction in " << TIMER_SEC(construction) << " seconds";
|
||||
}
|
||||
|
||||
void Extractor::WriteEdgeBasedGraph(
|
||||
std::string const &output_file_filename,
|
||||
EdgeID const max_edge_id,
|
||||
util::DeallocatingVector<EdgeBasedEdge> const &edge_based_edge_list)
|
||||
{
|
||||
storage::io::FileWriter file(output_file_filename,
|
||||
storage::io::FileWriter::GenerateFingerprint);
|
||||
|
||||
util::Log() << "Writing edge-based-graph edges ... " << std::flush;
|
||||
TIMER_START(write_edges);
|
||||
|
||||
std::uint64_t number_of_used_edges = edge_based_edge_list.size();
|
||||
file.WriteElementCount64(number_of_used_edges);
|
||||
file.WriteOne(max_edge_id);
|
||||
|
||||
for (const auto &edge : edge_based_edge_list)
|
||||
{
|
||||
file.WriteOne(edge);
|
||||
}
|
||||
|
||||
TIMER_STOP(write_edges);
|
||||
util::Log() << "ok, after " << TIMER_SEC(write_edges) << "s";
|
||||
|
||||
util::Log() << "Processed " << number_of_used_edges << " edges";
|
||||
}
|
||||
|
||||
void Extractor::WriteIntersectionClassificationData(
|
||||
const std::string &output_file_name,
|
||||
const std::vector<BearingClassID> &node_based_intersection_classes,
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "partition/multi_level_partition.hpp"
|
||||
#include "partition/recursive_bisection.hpp"
|
||||
#include "partition/remove_unconnected.hpp"
|
||||
#include "partition/renumber.hpp"
|
||||
|
||||
#include "extractor/files.hpp"
|
||||
|
||||
@ -17,12 +18,14 @@
|
||||
#include "util/integer_range.hpp"
|
||||
#include "util/json_container.hpp"
|
||||
#include "util/log.hpp"
|
||||
#include "util/mmap_file.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/assert.hpp>
|
||||
#include <boost/filesystem/operations.hpp>
|
||||
|
||||
#include "util/geojson_debug_logger.hpp"
|
||||
#include "util/geojson_debug_policies.hpp"
|
||||
@ -129,10 +132,10 @@ int Partitioner::Run(const PartitionConfig &config)
|
||||
extractor::files::readNBGMapping(config.cnbg_ebg_mapping_path.string(), mapping);
|
||||
util::Log() << "Loaded node based graph to edge based graph mapping";
|
||||
|
||||
auto edge_based_graph = LoadEdgeBasedGraph(config.edge_based_graph_path.string());
|
||||
auto edge_based_graph = LoadEdgeBasedGraph(config.edge_based_graph_path);
|
||||
util::Log() << "Loaded edge based graph for mapping partition ids: "
|
||||
<< edge_based_graph->GetNumberOfEdges() << " edges, "
|
||||
<< edge_based_graph->GetNumberOfNodes() << " nodes";
|
||||
<< edge_based_graph.GetNumberOfEdges() << " edges, "
|
||||
<< edge_based_graph.GetNumberOfNodes() << " nodes";
|
||||
|
||||
// TODO: node based graph to edge based graph partition id mapping should be done split off.
|
||||
|
||||
@ -140,7 +143,7 @@ int Partitioner::Run(const PartitionConfig &config)
|
||||
const auto &node_based_partition_ids = recursive_bisection.BisectionIDs();
|
||||
|
||||
// Partition ids, keyed by edge based graph nodes
|
||||
std::vector<NodeID> edge_based_partition_ids(edge_based_graph->GetNumberOfNodes(),
|
||||
std::vector<NodeID> edge_based_partition_ids(edge_based_graph.GetNumberOfNodes(),
|
||||
SPECIAL_NODEID);
|
||||
|
||||
// Only resolve all easy cases in the first pass
|
||||
@ -163,7 +166,7 @@ int Partitioner::Run(const PartitionConfig &config)
|
||||
std::tie(partitions, level_to_num_cells) =
|
||||
bisectionToPartition(edge_based_partition_ids, config.max_cell_sizes);
|
||||
|
||||
auto num_unconnected = removeUnconnectedBoundaryNodes(*edge_based_graph, partitions);
|
||||
auto num_unconnected = removeUnconnectedBoundaryNodes(edge_based_graph, partitions);
|
||||
util::Log() << "Fixed " << num_unconnected << " unconnected nodes";
|
||||
|
||||
util::Log() << "Edge-based-graph annotation:";
|
||||
@ -173,19 +176,47 @@ int Partitioner::Run(const PartitionConfig &config)
|
||||
<< " bit size " << std::ceil(std::log2(level_to_num_cells[level] + 1));
|
||||
}
|
||||
|
||||
TIMER_START(renumber);
|
||||
auto permutation = makePermutation(edge_based_graph, partitions);
|
||||
renumber(edge_based_graph, permutation);
|
||||
renumber(partitions, permutation);
|
||||
{
|
||||
boost::iostreams::mapped_file segment_region;
|
||||
auto segments =
|
||||
util::mmapFile<extractor::EdgeBasedNodeSegment>(config.file_index_path, segment_region);
|
||||
renumber(segments, permutation);
|
||||
}
|
||||
{
|
||||
extractor::EdgeBasedNodeDataContainer node_data;
|
||||
extractor::files::readNodeData(config.node_data_path, node_data);
|
||||
renumber(node_data, permutation);
|
||||
extractor::files::writeNodeData(config.node_data_path, node_data);
|
||||
}
|
||||
if (boost::filesystem::exists(config.hsgr_path))
|
||||
{
|
||||
util::Log(logWARNING) << "Found existing .osrm.hsgr file, removing. You need to re-run "
|
||||
"osrm-contract after osrm-partition.";
|
||||
boost::filesystem::remove(config.hsgr_path);
|
||||
}
|
||||
TIMER_STOP(renumber);
|
||||
util::Log() << "Renumbered data in " << TIMER_SEC(renumber) << " seconds";
|
||||
|
||||
TIMER_START(packed_mlp);
|
||||
MultiLevelPartition mlp{partitions, level_to_num_cells};
|
||||
TIMER_STOP(packed_mlp);
|
||||
util::Log() << "MultiLevelPartition constructed in " << TIMER_SEC(packed_mlp) << " seconds";
|
||||
|
||||
TIMER_START(cell_storage);
|
||||
CellStorage storage(mlp, *edge_based_graph);
|
||||
CellStorage storage(mlp, edge_based_graph);
|
||||
TIMER_STOP(cell_storage);
|
||||
util::Log() << "CellStorage constructed in " << TIMER_SEC(cell_storage) << " seconds";
|
||||
|
||||
TIMER_START(writing_mld_data);
|
||||
files::writePartition(config.mld_partition_path, mlp);
|
||||
files::writeCells(config.mld_storage_path, storage);
|
||||
files::writePartition(config.partition_path, mlp);
|
||||
files::writeCells(config.storage_path, storage);
|
||||
extractor::files::writeEdgeBasedGraph(config.edge_based_graph_path,
|
||||
edge_based_graph.GetNumberOfNodes() - 1,
|
||||
graphToEdges(edge_based_graph));
|
||||
TIMER_STOP(writing_mld_data);
|
||||
util::Log() << "MLD data writing took " << TIMER_SEC(writing_mld_data) << " seconds";
|
||||
|
||||
|
74
src/partition/renumber.cpp
Normal file
74
src/partition/renumber.cpp
Normal file
@ -0,0 +1,74 @@
|
||||
#include "partition/renumber.hpp"
|
||||
|
||||
#include <tbb/parallel_sort.h>
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace partition
|
||||
{
|
||||
namespace
|
||||
{
|
||||
// Returns a vector that is indexed by node ID marking the level at which it is a border node
|
||||
std::vector<LevelID> getHighestBorderLevel(const DynamicEdgeBasedGraph &graph,
|
||||
const std::vector<Partition> &partitions)
|
||||
{
|
||||
std::vector<LevelID> border_level(graph.GetNumberOfNodes(), 0);
|
||||
|
||||
for (const auto level : util::irange<LevelID>(1, partitions.size() + 1))
|
||||
{
|
||||
const auto &partition = partitions[level - 1];
|
||||
for (auto node : util::irange<NodeID>(0, graph.GetNumberOfNodes()))
|
||||
{
|
||||
for (auto edge : graph.GetAdjacentEdgeRange(node))
|
||||
{
|
||||
auto target = graph.GetTarget(edge);
|
||||
if (partition[node] != partition[target])
|
||||
{
|
||||
// level is monotone increasing so we wil
|
||||
// always overwrite here with a value equal
|
||||
// or greater then the current border_level
|
||||
border_level[node] = level;
|
||||
border_level[target] = level;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return border_level;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::uint32_t> makePermutation(const DynamicEdgeBasedGraph &graph,
|
||||
const std::vector<Partition> &partitions)
|
||||
{
|
||||
std::vector<std::uint32_t> ordering(graph.GetNumberOfNodes());
|
||||
std::iota(ordering.begin(), ordering.end(), 0);
|
||||
|
||||
// Sort the nodes by cell ID recursively:
|
||||
// Nodes in the same cell will be sorted by cell ID on the level below
|
||||
for (const auto &partition : partitions)
|
||||
{
|
||||
std::stable_sort(
|
||||
ordering.begin(), ordering.end(), [&partition](const auto lhs, const auto rhs) {
|
||||
return partition[lhs] < partition[rhs];
|
||||
});
|
||||
}
|
||||
|
||||
// Now sort the nodes by the level at which they are a border node, descening.
|
||||
// That means nodes that are border nodes on the highest level will have a very low ID,
|
||||
// whereas nodes that are nerver border nodes are sorted to the end of the array.
|
||||
// Note: Since we use a stable sort that preserves the cell sorting within each level
|
||||
auto border_level = getHighestBorderLevel(graph, partitions);
|
||||
std::stable_sort(
|
||||
ordering.begin(), ordering.end(), [&border_level](const auto lhs, const auto rhs) {
|
||||
return border_level[lhs] > border_level[rhs];
|
||||
});
|
||||
|
||||
std::vector<std::uint32_t> permutation(ordering.size());
|
||||
for (auto index : util::irange<std::uint32_t>(0, ordering.size()))
|
||||
permutation[ordering[index]] = index;
|
||||
|
||||
return permutation;
|
||||
}
|
||||
}
|
||||
}
|
@ -142,26 +142,6 @@ void checkWeightsConsistency(
|
||||
}
|
||||
#endif
|
||||
|
||||
auto mmapFile(const std::string &filename, boost::interprocess::mode_t mode)
|
||||
{
|
||||
using boost::interprocess::file_mapping;
|
||||
using boost::interprocess::mapped_region;
|
||||
|
||||
try
|
||||
{
|
||||
const file_mapping mapping{filename.c_str(), mode};
|
||||
|
||||
mapped_region region{mapping, mode};
|
||||
region.advise(mapped_region::advice_sequential);
|
||||
return region;
|
||||
}
|
||||
catch (const std::exception &e)
|
||||
{
|
||||
util::Log(logERROR) << "Error while trying to mmap " + filename + ": " + e.what();
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
tbb::concurrent_vector<GeometryID>
|
||||
updateSegmentData(const UpdaterConfig &config,
|
||||
const extractor::ProfileProperties &profile_properties,
|
||||
@ -420,14 +400,11 @@ updateTurnPenalties(const UpdaterConfig &config,
|
||||
extractor::PackedOSMIDs osm_node_ids)
|
||||
{
|
||||
const auto weight_multiplier = profile_properties.GetWeightMultiplier();
|
||||
const auto turn_index_region =
|
||||
mmapFile(config.turn_penalties_index_path, boost::interprocess::read_only);
|
||||
|
||||
// Mapped file pointer for turn indices
|
||||
const extractor::lookup::TurnIndexBlock *turn_index_blocks =
|
||||
reinterpret_cast<const extractor::lookup::TurnIndexBlock *>(
|
||||
turn_index_region.get_address());
|
||||
BOOST_ASSERT(is_aligned<extractor::lookup::TurnIndexBlock>(turn_index_blocks));
|
||||
boost::iostreams::mapped_file_source turn_index_region;
|
||||
auto turn_index_blocks = util::mmapFile<extractor::lookup::TurnIndexBlock>(
|
||||
config.turn_penalties_index_path, turn_index_region);
|
||||
|
||||
// Get the turn penalty and update to the new value if required
|
||||
std::vector<std::uint64_t> updated_turns;
|
||||
@ -508,13 +485,10 @@ updateConditionalTurns(const UpdaterConfig &config,
|
||||
extractor::PackedOSMIDs &osm_node_ids,
|
||||
Timezoner time_zone_handler)
|
||||
{
|
||||
const auto turn_index_region =
|
||||
mmapFile(config.turn_penalties_index_path, boost::interprocess::read_only);
|
||||
// Mapped file pointer for turn indices
|
||||
const extractor::lookup::TurnIndexBlock *turn_index_blocks =
|
||||
reinterpret_cast<const extractor::lookup::TurnIndexBlock *>(
|
||||
turn_index_region.get_address());
|
||||
BOOST_ASSERT(is_aligned<extractor::lookup::TurnIndexBlock>(turn_index_blocks));
|
||||
boost::iostreams::mapped_file_source turn_index_region;
|
||||
auto turn_index_blocks = util::mmapFile<extractor::lookup::TurnIndexBlock>(
|
||||
config.turn_penalties_index_path, turn_index_region);
|
||||
|
||||
std::vector<std::uint64_t> updated_turns;
|
||||
if (conditional_turns.size() == 0)
|
||||
@ -589,16 +563,9 @@ Updater::LoadAndUpdateEdgeExpandedGraph(std::vector<extractor::EdgeBasedEdge> &e
|
||||
std::vector<util::Coordinate> coordinates;
|
||||
extractor::PackedOSMIDs osm_node_ids;
|
||||
|
||||
{
|
||||
storage::io::FileReader reader(config.edge_based_graph_path,
|
||||
storage::io::FileReader::VerifyFingerprint);
|
||||
auto num_edges = reader.ReadElementCount64();
|
||||
edge_based_edge_list.resize(num_edges);
|
||||
max_edge_id = reader.ReadOne<EdgeID>();
|
||||
reader.ReadInto(edge_based_edge_list);
|
||||
|
||||
extractor::files::readNodes(config.node_based_nodes_data_path, coordinates, osm_node_ids);
|
||||
}
|
||||
extractor::files::readEdgeBasedGraph(
|
||||
config.edge_based_graph_path, max_edge_id, edge_based_edge_list);
|
||||
extractor::files::readNodes(config.node_based_nodes_data_path, coordinates, osm_node_ids);
|
||||
|
||||
const bool update_conditional_turns =
|
||||
!config.turn_restrictions_path.empty() && config.valid_now;
|
||||
|
12
src/util/timed_historgram.cpp
Normal file
12
src/util/timed_historgram.cpp
Normal file
@ -0,0 +1,12 @@
|
||||
#include "util/timed_histogram.hpp"
|
||||
|
||||
namespace osrm
|
||||
{
|
||||
namespace util
|
||||
{
|
||||
namespace detail
|
||||
{
|
||||
std::atomic_uint operation = {0};
|
||||
}
|
||||
}
|
||||
}
|
86
unit_tests/partition/renumber.cpp
Normal file
86
unit_tests/partition/renumber.cpp
Normal file
@ -0,0 +1,86 @@
|
||||
#include <boost/numeric/conversion/cast.hpp>
|
||||
#include <boost/test/unit_test.hpp>
|
||||
|
||||
#include "partition/renumber.hpp"
|
||||
|
||||
#include "../common/range_tools.hpp"
|
||||
|
||||
using namespace osrm;
|
||||
using namespace osrm::partition;
|
||||
|
||||
namespace
|
||||
{
|
||||
struct MockEdge
|
||||
{
|
||||
NodeID start;
|
||||
NodeID target;
|
||||
};
|
||||
|
||||
auto makeGraph(const std::vector<MockEdge> &mock_edges)
|
||||
{
|
||||
struct EdgeData
|
||||
{
|
||||
EdgeWeight weight;
|
||||
bool forward;
|
||||
bool backward;
|
||||
};
|
||||
using InputEdge = DynamicEdgeBasedGraph::InputEdge;
|
||||
std::vector<InputEdge> edges;
|
||||
std::size_t max_id = 0;
|
||||
for (const auto &m : mock_edges)
|
||||
{
|
||||
max_id = std::max<std::size_t>(max_id, std::max(m.start, m.target));
|
||||
|
||||
edges.push_back(InputEdge{
|
||||
m.start, m.target, EdgeBasedGraphEdgeData{SPECIAL_NODEID, 1, 1, true, false}});
|
||||
edges.push_back(InputEdge{
|
||||
m.target, m.start, EdgeBasedGraphEdgeData{SPECIAL_NODEID, 1, 1, false, true}});
|
||||
}
|
||||
std::sort(edges.begin(), edges.end());
|
||||
return DynamicEdgeBasedGraph(max_id + 1, edges);
|
||||
}
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_SUITE(renumber_tests)
|
||||
|
||||
BOOST_AUTO_TEST_CASE(unsplitable_case)
|
||||
{
|
||||
// node: 0 1 2 3 4 5 6 7 8 9 10 11
|
||||
// border: x x x x x x x
|
||||
// permutation by cells: 0 1 2 5 6 10 11 7 8 9 3 4
|
||||
// order by cell: 0 1 2 10 11 3 4 7 8 9 5 6
|
||||
// x x x x x x x
|
||||
// border level: 3 3 3 2 2 1 1 0 0 0 0 0
|
||||
// order: 0 10 11 7 6 3 4 1 2 8 9 5
|
||||
// permutation: 0 7 8 5 6 11 4 3 9 10 1 2
|
||||
std::vector<CellID> l1{{0, 0, 1, 2, 3, 5, 5, 3, 4, 4, 1, 2}};
|
||||
std::vector<CellID> l2{{0, 0, 0, 1, 1, 3, 3, 1, 2, 2, 0, 1}};
|
||||
std::vector<CellID> l3{{0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1}};
|
||||
std::vector<CellID> l4{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}};
|
||||
|
||||
std::vector<MockEdge> edges = {
|
||||
// edges sorted into border/internal by level
|
||||
// level: (1) (2) (3) (4)
|
||||
{0, 1}, // i i i i
|
||||
{2, 10}, // i i i i
|
||||
{10, 7}, // b b b i
|
||||
{11, 0}, // b b b i
|
||||
{11, 3}, // i i i i
|
||||
{3, 4}, // b i i i
|
||||
{4, 11}, // b i i i
|
||||
{4, 7}, // i i i i
|
||||
{7, 6}, // b b i i
|
||||
{8, 9}, // i i i i
|
||||
{9, 8}, // i i i i
|
||||
{5, 6}, // i i i i
|
||||
{6, 5} // i i i i
|
||||
};
|
||||
|
||||
auto graph = makeGraph(edges);
|
||||
std::vector<Partition> partitions{l1, l2, l3, l4};
|
||||
|
||||
auto permutation = makePermutation(graph, partitions);
|
||||
CHECK_EQUAL_RANGE(permutation, 0, 7, 8, 5, 6, 11, 4, 3, 9, 10, 1, 2);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_SUITE_END()
|
@ -64,4 +64,60 @@ BOOST_AUTO_TEST_CASE(find_test)
|
||||
BOOST_CHECK_EQUAL(simple_graph.GetEdgeData(eit).id, 2);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(renumber_test)
|
||||
{
|
||||
/*
|
||||
* (0) -1-> (1)
|
||||
* ^ ^
|
||||
* 2 5
|
||||
* | |
|
||||
* (3) -3-> (4)
|
||||
* <-4-
|
||||
*/
|
||||
std::vector<TestInputEdge> input_edges = {TestInputEdge{0, 1, TestData{1}},
|
||||
TestInputEdge{3, 0, TestData{2}},
|
||||
TestInputEdge{3, 0, TestData{5}},
|
||||
TestInputEdge{3, 4, TestData{3}},
|
||||
TestInputEdge{4, 3, TestData{4}}};
|
||||
TestDynamicGraph simple_graph(5, input_edges);
|
||||
|
||||
/*
|
||||
* (1) -1-> (3)
|
||||
* ^ ^
|
||||
* 2 5
|
||||
* | |
|
||||
* (0) -3-> (2)
|
||||
* <-4-
|
||||
*/
|
||||
simple_graph.Renumber({1, 3, 4, 0, 2});
|
||||
|
||||
auto eit = simple_graph.FindEdge(1, 3);
|
||||
BOOST_CHECK(eit != SPECIAL_EDGEID);
|
||||
BOOST_CHECK_EQUAL(simple_graph.GetEdgeData(eit).id, 1);
|
||||
|
||||
eit = simple_graph.FindEdge(3, 1);
|
||||
BOOST_CHECK_EQUAL(eit, SPECIAL_EDGEID);
|
||||
|
||||
eit = simple_graph.FindEdgeInEitherDirection(3, 1);
|
||||
BOOST_CHECK_EQUAL(simple_graph.GetEdgeData(eit).id, 1);
|
||||
|
||||
bool reverse = false;
|
||||
eit = simple_graph.FindEdgeIndicateIfReverse(3, 1, reverse);
|
||||
BOOST_CHECK_EQUAL(simple_graph.GetEdgeData(eit).id, 1);
|
||||
BOOST_CHECK(reverse);
|
||||
|
||||
eit = simple_graph.FindEdge(0, 3);
|
||||
BOOST_CHECK_EQUAL(eit, SPECIAL_EDGEID);
|
||||
eit = simple_graph.FindEdge(1, 2);
|
||||
BOOST_CHECK_EQUAL(eit, SPECIAL_EDGEID);
|
||||
|
||||
eit = simple_graph.FindEdge(0, 2);
|
||||
BOOST_CHECK_EQUAL(simple_graph.GetEdgeData(eit).id, 3);
|
||||
eit = simple_graph.FindEdgeInEitherDirection(0, 2);
|
||||
BOOST_CHECK_EQUAL(simple_graph.GetEdgeData(eit).id, 3);
|
||||
|
||||
eit = simple_graph.FindEdge(0, 1);
|
||||
BOOST_CHECK_EQUAL(simple_graph.GetEdgeData(eit).id, 2);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_SUITE_END()
|
||||
|
30
unit_tests/util/permutation.cpp
Normal file
30
unit_tests/util/permutation.cpp
Normal file
@ -0,0 +1,30 @@
|
||||
#include "../common/range_tools.hpp"
|
||||
|
||||
#include "util/permutation.hpp"
|
||||
|
||||
#include <boost/test/test_case_template.hpp>
|
||||
#include <boost/test/unit_test.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <numeric>
|
||||
#include <random>
|
||||
|
||||
BOOST_AUTO_TEST_SUITE(permutation_test)
|
||||
|
||||
using namespace osrm;
|
||||
using namespace osrm::util;
|
||||
|
||||
BOOST_AUTO_TEST_CASE(basic_permuation)
|
||||
{
|
||||
// cycles (0 3 2 1 4 8) (5) (6 9 7)
|
||||
// 0 1 2 3 4 5 6 7 8 9
|
||||
std::vector<std::uint32_t> values{1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
|
||||
const std::vector<std::uint32_t> permutation{3, 4, 1, 2, 8, 5, 9, 6, 0, 7};
|
||||
std::vector<std::uint32_t> reference{9, 3, 4, 1, 2, 6, 8, 10, 5, 7};
|
||||
|
||||
inplacePermutation(values.begin(), values.end(), permutation);
|
||||
|
||||
CHECK_EQUAL_COLLECTIONS(values, reference);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_SUITE_END()
|
@ -150,4 +150,59 @@ BOOST_AUTO_TEST_CASE(find_test)
|
||||
BOOST_CHECK_EQUAL(simple_graph.GetEdgeData(eit).id, 2);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(renumber_test)
|
||||
{
|
||||
/*
|
||||
* (0) -1-> (1)
|
||||
* ^ ^
|
||||
* 2 5
|
||||
* | |
|
||||
* (3) -3-> (4)
|
||||
* <-4-
|
||||
*/
|
||||
std::vector<TestInputEdge> input_edges = {TestInputEdge{0, 1, EdgeID{1}},
|
||||
TestInputEdge{3, 0, EdgeID{2}},
|
||||
TestInputEdge{3, 0, EdgeID{5}},
|
||||
TestInputEdge{3, 4, EdgeID{3}},
|
||||
TestInputEdge{4, 3, EdgeID{4}}};
|
||||
TestStaticGraph simple_graph(5, input_edges);
|
||||
/*
|
||||
* (1) -1-> (3)
|
||||
* ^ ^
|
||||
* 2 5
|
||||
* | |
|
||||
* (0) -3-> (2)
|
||||
* <-4-
|
||||
*/
|
||||
simple_graph.Renumber({1, 3, 4, 0, 2});
|
||||
|
||||
auto eit = simple_graph.FindEdge(1, 3);
|
||||
BOOST_CHECK(eit != SPECIAL_EDGEID);
|
||||
BOOST_CHECK_EQUAL(simple_graph.GetEdgeData(eit).id, 1);
|
||||
|
||||
eit = simple_graph.FindEdge(3, 1);
|
||||
BOOST_CHECK_EQUAL(eit, SPECIAL_EDGEID);
|
||||
|
||||
eit = simple_graph.FindEdgeInEitherDirection(3, 1);
|
||||
BOOST_CHECK_EQUAL(simple_graph.GetEdgeData(eit).id, 1);
|
||||
|
||||
bool reverse = false;
|
||||
eit = simple_graph.FindEdgeIndicateIfReverse(3, 1, reverse);
|
||||
BOOST_CHECK_EQUAL(simple_graph.GetEdgeData(eit).id, 1);
|
||||
BOOST_CHECK(reverse);
|
||||
|
||||
eit = simple_graph.FindEdge(0, 3);
|
||||
BOOST_CHECK_EQUAL(eit, SPECIAL_EDGEID);
|
||||
eit = simple_graph.FindEdge(1, 2);
|
||||
BOOST_CHECK_EQUAL(eit, SPECIAL_EDGEID);
|
||||
|
||||
eit = simple_graph.FindEdge(0, 2);
|
||||
BOOST_CHECK_EQUAL(simple_graph.GetEdgeData(eit).id, 3);
|
||||
eit = simple_graph.FindEdgeInEitherDirection(0, 2);
|
||||
BOOST_CHECK_EQUAL(simple_graph.GetEdgeData(eit).id, 3);
|
||||
|
||||
eit = simple_graph.FindEdge(0, 1);
|
||||
BOOST_CHECK_EQUAL(simple_graph.GetEdgeData(eit).id, 2);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_SUITE_END()
|
||||
|
Loading…
Reference in New Issue
Block a user