always emit a small component view

Unit Tests for Reodering by Predicate
This commit is contained in:
Moritz Kobitzsch 2017-02-01 15:50:06 +01:00 committed by Patrick Niklaus
parent b9ed20bb9b
commit c3cc79f798
17 changed files with 323 additions and 195 deletions

View File

@ -19,8 +19,9 @@ namespace osrm
namespace partition
{
// Graph node and its corresponding coordinate.
// The coordinate will be used in the partitioning step.
// Node in the bisection graph. We require the original node id (since we remap the nodes all the
// time and can track the correct ID this way). In addtition, the node provides the coordinate its
// located at for use in the inertial flow sorting by slope.
struct BisectionNode
{
BisectionNode(util::Coordinate coordinate_ = {util::FloatLongitude{0}, util::FloatLatitude{0}},
@ -36,7 +37,12 @@ struct BisectionNode
NodeID original_id;
};
// Graph edge and data for Max-Flow Min-Cut augmentation.
// For max-flow/min-cut computations, we operate on a undirected graph. This has some benefits:
// - we don't disconnect the graph more than we have to
// - small components will actually be disconnected (no border nodes)
// - parts of the graph that are clonnected in one way (not reachable/not exitable) will remain
// close to their connected nodes
// As a result, we only require a target as our only data member in the edge.
struct BisectionEdge
{
BisectionEdge(const NodeID target_ = SPECIAL_NODEID) : target(target_) {}
@ -44,11 +50,13 @@ struct BisectionEdge
NodeID target;
};
// The graph layout we use as a basis for partitioning.
using RemappableGraphNode = NodeEntryWrapper<BisectionNode>;
// Aliases for the graph used during the bisection, based on the Remappable graph
using BisectionGraphNode = NodeEntryWrapper<BisectionNode>;
using BisectionInputEdge = GraphConstructionWrapper<BisectionEdge>;
using BisectionGraph = RemappableGraph<RemappableGraphNode, BisectionEdge>;
using BisectionGraph = RemappableGraph<BisectionGraphNode, BisectionEdge>;
// Factory method to construct the bisection graph form a set of coordinates and Input Edges (need
// to contain source and target)
inline BisectionGraph makeBisectionGraph(const std::vector<util::Coordinate> &coordinates,
const std::vector<BisectionInputEdge> &edges)
{
@ -69,23 +77,25 @@ inline BisectionGraph makeBisectionGraph(const std::vector<util::Coordinate> &co
};
// create a bisection node, requires the ID of the node as well as the lower bound to its edges
const auto make_bisection_node = [&edges, &coordinates](const std::size_t node_id,
const auto edge_itr) {
std::size_t range_begin = std::distance(edges.begin(), edge_itr);
return BisectionGraph::NodeT(range_begin, range_begin, coordinates[node_id], node_id);
const auto make_bisection_node = [&edges, &coordinates](
const std::size_t node_id, const auto begin_itr, const auto end_itr) {
std::size_t range_begin = std::distance(edges.begin(), begin_itr);
std::size_t range_end = std::distance(edges.begin(), end_itr);
return BisectionGraph::NodeT(range_begin, range_end, coordinates[node_id], node_id);
};
auto edge_itr = edges.begin();
for (std::size_t node_id = 0; node_id < coordinates.size(); ++node_id)
{
result_nodes.emplace_back(make_bisection_node(node_id, edge_itr));
const auto begin_itr = edge_itr;
edge_itr = advance_edge_itr(node_id, edge_itr);
result_nodes.back().edges_end = std::distance(edges.begin(), edge_itr);
result_nodes.emplace_back(make_bisection_node(node_id, begin_itr, edge_itr));
}
return BisectionGraph(std::move(result_nodes), std::move(result_edges));
}
// Reduce any edge to a fitting input edge for the bisection graph
template <typename InputEdge>
std::vector<BisectionInputEdge> adaptToBisectionEdge(std::vector<InputEdge> edges)
{

View File

@ -6,23 +6,10 @@
#include <cstdint>
#include <functional>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
namespace std
{
template <> struct hash<std::pair<NodeID, NodeID>>
{
std::size_t operator()(const std::pair<NodeID, NodeID> &flow_edge) const
{
std::size_t combined = (static_cast<std::size_t>(flow_edge.first) << 32) | flow_edge.second;
return std::hash<std::size_t>()(combined);
}
};
}
namespace osrm
{
namespace partition
@ -31,42 +18,68 @@ namespace partition
class DinicMaxFlow
{
public:
// maximal number of hops in the graph from source to sink
using Level = std::uint32_t;
using MinCut = struct
{
std::size_t num_nodes_source;
std::size_t num_edges;
std::vector<bool> flags;
};
// input parameter storing the set o
using SourceSinkNodes = std::unordered_set<NodeID>;
using LevelGraph = std::vector<Level>;
using FlowEdges = std::vector<std::set<NodeID>>;
MinCut operator()(const GraphView &view,
const SourceSinkNodes &sink_nodes,
const SourceSinkNodes &source_nodes) const;
private:
// the level of each node in the graph (==hops in BFS from source)
using LevelGraph = std::vector<Level>;
// this is actually faster than using an unordered_set<Edge>, stores all edges that have
// capacity grouped by node
using FlowEdges = std::vector<std::set<NodeID>>;
// The level graph (see [1]) is based on a BFS computation. We assign a level to all nodes
// (starting with 0 for all source nodes) and assign the hop distance in the residual graph as
// the level of the node.
// a
// / \
// s t
// \ /
// b
// would assign s = 0, a,b = 1, t=2
LevelGraph ComputeLevelGraph(const GraphView &view,
const std::vector<NodeID> &border_source_nodes,
const SourceSinkNodes &source_nodes,
const SourceSinkNodes &sink_nodes,
const FlowEdges &flow) const;
std::uint32_t BlockingFlow(FlowEdges &flow,
LevelGraph &levels,
const GraphView &view,
const SourceSinkNodes &source_nodes,
const std::vector<NodeID> &border_sink_nodes) const;
// Using the above levels (see ComputeLevelGraph), we can use multiple DFS (that can now be
// directed at the sink) to find a flow that completely blocks the level graph (i.e. no path
// with increasing level exists from `s` to `t`).
std::size_t BlockingFlow(FlowEdges &flow,
LevelGraph &levels,
const GraphView &view,
const SourceSinkNodes &source_nodes,
const std::vector<NodeID> &border_sink_nodes) const;
// Finds a single augmenting path from a node to the sink side following levels in the level
// graph. We don't actually remove the edges, so we have to check for increasing level values.
// Since we know which sinks have been reached, we actually search for these paths starting at
// sink nodes, instead of the source, so we can save a few dfs runs
std::vector<NodeID> GetAugmentingPath(LevelGraph &levels,
const NodeID from,
const GraphView &view,
const FlowEdges &flow,
const SourceSinkNodes &sink_nodes) const;
const SourceSinkNodes &source_nodes) const;
// Builds an actual cut result from a level graph
MinCut MakeCut(const GraphView &view, const LevelGraph &levels) const;
MinCut
MakeCut(const GraphView &view, const LevelGraph &levels, const std::size_t flow_value) const;
};
} // namespace partition

View File

@ -6,6 +6,7 @@
#include <boost/iterator/filter_iterator.hpp>
#include <boost/iterator/iterator_facade.hpp>
#include <boost/range/iterator_range.hpp>
#include <cstddef>
#include <cstdint>
@ -20,32 +21,38 @@ namespace partition
class GraphView
{
public:
GraphView(const BisectionGraph &graph,
const BisectionGraph::ConstNodeIterator begin,
const BisectionGraph::ConstNodeIterator end);
using ConstNodeIterator = BisectionGraph::ConstNodeIterator;
using NodeIterator = BisectionGraph::NodeIterator;
using NodeT = BisectionGraph::NodeT;
using EdgeT = BisectionGraph::EdgeT;
// Construction either for a subrange, or for a full range
GraphView(const BisectionGraph &graph);
GraphView(const BisectionGraph &graph,
const ConstNodeIterator begin,
const ConstNodeIterator end);
// construction from a different view, no need to keep the graph around
GraphView(const GraphView &view, const ConstNodeIterator begin, const ConstNodeIterator end);
// Number of nodes _in this sub-graph.
std::size_t NumberOfNodes() const;
BisectionGraph::ConstNodeIterator Begin() const;
BisectionGraph::ConstNodeIterator End() const;
// Iteration over all nodes (direct access into the node)
ConstNodeIterator Begin() const;
ConstNodeIterator End() const;
const BisectionNode &GetNode(const NodeID nid) const;
const BisectionEdge &GetEdge(const EdgeID eid) const;
// Re-Construct the ID of a node from a reference
NodeID GetID(const NodeT &node) const;
NodeID GetID(const BisectionGraph::NodeT &node) const;
// Access into single nodes/Edges
const NodeT &Node(const NodeID nid) const;
const EdgeT &Edge(const EdgeID eid) const;
inline auto Edges(const NodeID nid) const { return bisection_graph.Edges(*(begin + nid)); }
inline auto BeginEdges(const NodeID nid) const
{
return bisection_graph.BeginEdges(*(begin + nid));
}
inline auto EndEdges(const NodeID nid) const
{
return bisection_graph.EndEdges(*(begin + nid));
}
// Access into all Edges
auto Edges(const NodeID nid) const { return bisection_graph.Edges(*(begin + nid)); }
auto BeginEdges(const NodeID nid) const { return bisection_graph.BeginEdges(*(begin + nid)); }
auto EndEdges(const NodeID nid) const { return bisection_graph.EndEdges(*(begin + nid)); }
private:
const BisectionGraph &bisection_graph;

View File

@ -15,6 +15,9 @@ namespace osrm
namespace partition
{
// forward declaration to allow finding friends
template <typename NodeEntryT, typename EdgeEntryT> class RemappableGraph;
// wrapper for nodes to augment with a tag storing first edge id
template <typename Base> class NodeEntryWrapper : public Base
{
@ -25,10 +28,17 @@ template <typename Base> class NodeEntryWrapper : public Base
{
}
private:
// only to be modified by the graph itself
std::size_t edges_begin;
std::size_t edges_end;
// give the graph access to the node data wrapper
template <typename NodeEntryT, typename EdgeEntryT> friend class RemappableGraph;
};
using RemappableGraphNode = NodeEntryWrapper<struct zero_base_class>;
template <typename Base> class GraphConstructionWrapper : public Base
{
public:
@ -106,10 +116,19 @@ template <typename NodeEntryT, typename EdgeEntryT> class RemappableGraph
auto BeginEdges(const NodeT &node) { return edges.begin() + node.edges_begin; }
auto EndEdges(const NodeT &node) { return edges.begin() + node.edges_end; }
EdgeID BeginEdgeID(const NodeID nid) const { return nodes[nid].edges_begin; }
EdgeID EndEdgeID(const NodeID nid) const { return nodes[nid].edges_end; }
// iterate over all nodes
auto Nodes() { return boost::make_iterator_range(nodes.begin(), nodes.end()); }
auto Nodes() const { return boost::make_iterator_range(nodes.begin(), nodes.end()); }
NodeID GetID(const NodeT &node)
{
BOOST_ASSERT(&node >= &nodes[0] && &node <= &nodes.back());
return (&node - &nodes[0]);
}
NodeIterator Begin() { return nodes.begin(); }
NodeIterator End() { return nodes.end(); }
ConstNodeIterator CBegin() const { return nodes.cbegin(); }

View File

@ -22,6 +22,8 @@ class RecursiveBisection
std::size_t num_optimizing_cuts,
BisectionGraph &bisection_graph);
const std::vector<RecursiveBisectionState::BisectionID> &BisectionIDs() const;
private:
BisectionGraph &bisection_graph;
RecursiveBisectionState internal_state;

View File

@ -77,6 +77,8 @@ class RecursiveBisectionState
const std::size_t depth,
const std::vector<bool> &partition);
const std::vector<BisectionID> &BisectionIDs() const;
private:
BisectionGraph &bisection_graph;
std::vector<BisectionID> bisection_ids;

View File

@ -743,15 +743,8 @@ void Extractor::WriteCompressedNodeBasedGraph(const std::string &path,
BOOST_ASSERT_MSG(num_nodes == externals.size(), "graph and embedding out of sync");
const auto die = [] {
throw util::exception("Writing the compressed node based graph to disk failed");
};
if (!writer.WriteElementCount64(num_edges))
die();
if (!writer.WriteElementCount64(num_nodes))
die();
writer.WriteElementCount64(num_edges);
writer.WriteElementCount64(num_nodes);
// For all nodes iterate over its edges and dump (from, to) pairs
for (const NodeID from_node : util::irange(0u, num_nodes))
@ -760,19 +753,15 @@ void Extractor::WriteCompressedNodeBasedGraph(const std::string &path,
{
const auto to_node = graph.GetTarget(edge);
if (!writer.WriteOne(from_node))
die();
if (!writer.WriteOne(to_node))
die();
writer.WriteOne(from_node);
writer.WriteOne(to_node);
}
}
for (const auto &qnode : externals)
{
if (!writer.WriteOne(qnode.lon))
die();
if (!writer.WriteOne(qnode.lat))
die();
writer.WriteOne(qnode.lon);
writer.WriteOne(qnode.lat);
}
}

View File

@ -1,7 +1,9 @@
#include "partition/dinic_max_flow.hpp"
#include "util/integer_range.hpp"
#include <algorithm>
#include <limits>
#include <numeric>
#include <queue>
#include <set>
#include <stack>
@ -13,43 +15,46 @@ namespace partition
namespace
{
const auto constexpr INVALID_LEVEL = std::numeric_limits<DinicMaxFlow::Level>::max();
auto makeHasNeighborNotInCheck(const DinicMaxFlow::SourceSinkNodes &set, const GraphView &view)
{
return [&](const NodeID nid) {
const auto is_not_contained = [&set](const BisectionEdge &edge) {
return set.count(edge.target) == 0;
};
return view.EndEdges(nid) !=
std::find_if(view.BeginEdges(nid), view.EndEdges(nid), is_not_contained);
};
}
} // end namespace
DinicMaxFlow::MinCut DinicMaxFlow::operator()(const GraphView &view,
const SourceSinkNodes &source_nodes,
const SourceSinkNodes &sink_nodes) const
{
// for the inertial flow algorithm, we use quite a large set of nodes as source/sink nodes. Only
// a few of them can be part of the process, since they are grouped together. A standard
// parameterisation would be 25% sink/source nodes. This already includes 50% of the graph. By
// only focussing on a small set on the outside of the source/sink blob, we can save quite some
// overhead in initialisation/search cost.
std::vector<NodeID> border_source_nodes;
border_source_nodes.reserve(0.01 * source_nodes.size());
for (auto node : source_nodes)
{
for (const auto &edge : view.Edges(node))
{
const auto target = edge.target;
if (0 == source_nodes.count(target))
{
border_source_nodes.push_back(node);
break;
}
}
}
std::copy_if(source_nodes.begin(),
source_nodes.end(),
std::back_inserter(border_source_nodes),
makeHasNeighborNotInCheck(source_nodes, view));
std::vector<NodeID> border_sink_nodes;
border_sink_nodes.reserve(0.01 * sink_nodes.size());
for (auto node : sink_nodes)
{
for (const auto &edge : view.Edges(node))
{
const auto target = edge.target;
if (0 == sink_nodes.count(target))
{
border_sink_nodes.push_back(node);
break;
}
}
}
std::copy_if(sink_nodes.begin(),
sink_nodes.end(),
std::back_inserter(border_sink_nodes),
makeHasNeighborNotInCheck(sink_nodes, view));
// edges in current flow that have capacity
// The graph (V,E) contains undirected edges for all (u,v) \in V x V. We describe the flow as a
@ -57,13 +62,14 @@ DinicMaxFlow::MinCut DinicMaxFlow::operator()(const GraphView &view,
// from `t` to `s`, we can remove `(s,t)` from the flow, if we send flow back the first time,
// and insert `(t,s)` only if we send flow again.
// allocate storage for the flow
FlowEdges flow(view.NumberOfNodes());
std::size_t flow_value = 0;
do
{
std::cout << "." << std::flush;
auto levels = ComputeLevelGraph(view, border_source_nodes, source_nodes, sink_nodes, flow);
// check if the sink can be reached from the source
// check if the sink can be reached from the source, it's enough to check the border
const auto separated = std::find_if(border_sink_nodes.begin(),
border_sink_nodes.end(),
[&levels, &view](const auto node) {
@ -72,51 +78,36 @@ DinicMaxFlow::MinCut DinicMaxFlow::operator()(const GraphView &view,
if (!separated)
{
BlockingFlow(flow, levels, view, source_nodes, border_sink_nodes);
flow_value += BlockingFlow(flow, levels, view, source_nodes, border_sink_nodes);
}
else
{
// mark levels for all sources to not confuse make-cut
// mark levels for all sources to not confuse make-cut (due to the border nodes
// heuristic)
for (auto s : source_nodes)
levels[s] = 0;
return MakeCut(view, levels);
const auto cut = MakeCut(view, levels, flow_value);
return cut;
}
} while (true);
}
DinicMaxFlow::MinCut DinicMaxFlow::MakeCut(const GraphView &view, const LevelGraph &levels) const
DinicMaxFlow::MinCut DinicMaxFlow::MakeCut(const GraphView &view,
const LevelGraph &levels,
const std::size_t flow_value) const
{
const auto is_sink_side = [&view, &levels](const NodeID nid) {
return levels[nid] == INVALID_LEVEL;
};
const auto is_valid_level = [](const Level level) { return level != INVALID_LEVEL; };
// all elements within `levels` are on the source side
// This part should opt to find the most balanced cut, which is not necessarily the case right
// now
std::vector<bool> result(view.NumberOfNodes(), true);
std::size_t source_side_count = view.NumberOfNodes();
for (auto itr = view.Begin(); itr != view.End(); ++itr)
{
if (is_sink_side(std::distance(view.Begin(), itr)))
{
result[std::distance(view.Begin(), itr)] = false;
--source_side_count;
}
}
std::size_t num_edges = 0;
for (auto itr = view.Begin(); itr != view.End(); ++itr)
{
const auto nid = std::distance(view.Begin(), itr);
const auto sink_side = is_sink_side(nid);
for (const auto &edge : view.Edges(nid))
{
if (is_sink_side(edge.target) != sink_side)
{
++num_edges;
}
}
}
return {source_side_count, num_edges, std::move(result)};
// now. There is potential for optimisation here.
std::vector<bool> result(view.NumberOfNodes());
BOOST_ASSERT(view.NumberOfNodes() == levels.size());
std::size_t source_side_count = std::count_if(levels.begin(), levels.end(), is_valid_level);
std::transform(levels.begin(), levels.end(), result.begin(), is_valid_level);
return {source_side_count, flow_value, std::move(result)};
}
DinicMaxFlow::LevelGraph
@ -129,18 +120,16 @@ DinicMaxFlow::ComputeLevelGraph(const GraphView &view,
LevelGraph levels(view.NumberOfNodes(), INVALID_LEVEL);
std::queue<NodeID> level_queue;
// set the front of the source nodes to zero and add them to the BFS queue. In addition, set all
// neighbors to zero as well (which allows direct usage of the levels to see what we visited,
// and still don't go back into the hughe set of sources)
for (const auto node_id : border_source_nodes)
{
levels[node_id] = 0;
level_queue.push(node_id);
for (const auto &edge : view.Edges(node_id))
{
const auto target = edge.target;
if (source_nodes.count(target))
{
levels[target] = 0;
}
}
if (source_nodes.count(edge.target))
levels[edge.target] = 0;
}
// check if there is flow present on an edge
const auto has_flow = [&](const NodeID from, const NodeID to) {
@ -148,11 +137,12 @@ DinicMaxFlow::ComputeLevelGraph(const GraphView &view,
};
// perform a relaxation step in the BFS algorithm
const auto relax_node = [&](const NodeID node_id, const Level level) {
const auto relax_node = [&](const NodeID node_id) {
// don't relax sink nodes
if (sink_nodes.count(node_id))
return;
const auto level = levels[node_id] + 1;
for (const auto &edge : view.Edges(node_id))
{
const auto target = edge.target;
@ -170,44 +160,38 @@ DinicMaxFlow::ComputeLevelGraph(const GraphView &view,
};
// compute the levels of level graph using BFS
for (Level level = 1; !level_queue.empty(); ++level)
while (!level_queue.empty())
{
// run through the current level
auto steps = level_queue.size();
while (steps--)
{
relax_node(level_queue.front(), level);
level_queue.pop();
}
relax_node(level_queue.front());
level_queue.pop();
}
return levels;
}
std::uint32_t DinicMaxFlow::BlockingFlow(FlowEdges &flow,
LevelGraph &levels,
const GraphView &view,
const SourceSinkNodes &source_nodes,
const std::vector<NodeID> &border_sink_nodes) const
std::size_t DinicMaxFlow::BlockingFlow(FlowEdges &flow,
LevelGraph &levels,
const GraphView &view,
const SourceSinkNodes &source_nodes,
const std::vector<NodeID> &border_sink_nodes) const
{
std::uint32_t flow_increase = 0;
// track the number of augmenting paths (which in sum will equal the number of unique border
// edges) (since our graph is undirected)
std::size_t flow_increase = 0;
// augment the flow along a path in the level graph
const auto augment_flow = [&flow, &view](const std::vector<NodeID> &path) {
const auto augment_one = [&flow, &view](const NodeID from, const NodeID to) {
// add/remove flow edges from the current residual graph
const auto augment_one = [&flow, &view](const NodeID from, const NodeID to) {
// check if there is flow in the opposite direction
auto existing_edge = flow[to].find(from);
if (existing_edge != flow[to].end())
{
// remove flow from reverse edges first
flow[to].erase(existing_edge);
}
flow[to].erase(existing_edge); // remove flow from reverse edges first
else
{
// only add flow if no opposite flow exists
flow[from].insert(to);
}
// for adjacent find
flow[from].insert(to); // only add flow if no opposite flow exists
// do augmentation on all pairs, never stop early:
return false;
};
@ -215,32 +199,31 @@ std::uint32_t DinicMaxFlow::BlockingFlow(FlowEdges &flow,
std::adjacent_find(path.begin(), path.end(), augment_one);
};
// find and augment the blocking flow
const auto augment_all_paths = [&](const NodeID sink_node_id) {
// only augment sinks
if (levels[sink_node_id] == INVALID_LEVEL)
return;
std::vector<std::pair<std::uint32_t, NodeID>> reached_sinks;
for (auto sink : border_sink_nodes)
{
if (levels[sink] != INVALID_LEVEL)
while (true)
{
reached_sinks.push_back(std::make_pair(levels[sink], sink));
// as long as there are augmenting paths from the sink, add them
const auto path = GetAugmentingPath(levels, sink_node_id, view, flow, source_nodes);
if (path.empty())
break;
else
{
augment_flow(path);
++flow_increase;
}
}
}
std::sort(reached_sinks.begin(), reached_sinks.end());
for (auto sink_itr = reached_sinks.begin(); sink_itr != reached_sinks.end();)
{
auto path = GetAugmentingPath(levels, sink_itr->second, view, flow, source_nodes);
if (!path.empty())
augment_flow(path);
else
++sink_itr;
}
};
std::for_each(border_sink_nodes.begin(), border_sink_nodes.end(), augment_all_paths);
return flow_increase;
}
// performs a dfs in the level graph, by adjusting levels that don't offer any further paths to
// INVALID_LEVEL and by following the level graph, this looks at every edge at most `c` times (O(E))
std::vector<NodeID> DinicMaxFlow::GetAugmentingPath(LevelGraph &levels,
const NodeID node_id,
const GraphView &view,
@ -251,7 +234,7 @@ std::vector<NodeID> DinicMaxFlow::GetAugmentingPath(LevelGraph &levels,
BOOST_ASSERT(source_nodes.find(node_id) == source_nodes.end());
// Keeps the local state of the DFS in forms of the iterators
using DFSState = struct
struct DFSState
{
BisectionGraph::ConstEdgeIterator edge_iterator;
const BisectionGraph::ConstEdgeIterator end_iterator;

View File

@ -3,14 +3,15 @@
#include <iostream>
#include <iterator>
#include <boost/assert.hpp>
namespace osrm
{
namespace partition
{
GraphView::GraphView(const BisectionGraph &bisection_graph_)
: bisection_graph(bisection_graph_), begin(bisection_graph.CBegin()),
end(bisection_graph.CEnd())
: GraphView(bisection_graph_, bisection_graph_.CBegin(), bisection_graph_.CEnd())
{
}
@ -21,23 +22,32 @@ GraphView::GraphView(const BisectionGraph &bisection_graph_,
{
}
NodeID GraphView::GetID(const BisectionGraph::NodeT &node) const
GraphView::GraphView(const GraphView &other_view,
const BisectionGraph::ConstNodeIterator begin_,
const BisectionGraph::ConstNodeIterator end_)
: GraphView(other_view.bisection_graph, begin_, end_)
{
return static_cast<NodeID>(&node - &(*begin));
}
std::size_t GraphView::NumberOfNodes() const { return std::distance(begin, end); }
NodeID GraphView::GetID(const NodeT &node) const
{
const auto node_id = static_cast<NodeID>(&node - &(*begin));
BOOST_ASSERT(node_id < NumberOfNodes());
return node_id;
}
BisectionGraph::ConstNodeIterator GraphView::Begin() const { return begin; }
BisectionGraph::ConstNodeIterator GraphView::End() const { return end; }
std::size_t GraphView::NumberOfNodes() const { return std::distance(begin, end); }
const BisectionNode &GraphView::GetNode(const NodeID nid) const
const GraphView::NodeT &GraphView::Node(const NodeID nid) const
{
return bisection_graph.Node(nid);
}
const BisectionEdge &GraphView::GetEdge(const EdgeID eid) const
const GraphView::EdgeT &GraphView::Edge(const EdgeID eid) const
{
return bisection_graph.Edge(eid);
}

View File

@ -118,14 +118,9 @@ DinicMaxFlow::MinCut InertialFlow::BestMinCut(const std::size_t n, const double
// Swap to keep the destruction of the old object outside of critical section.
if (std::tie(cut.num_edges, cut_balance) < std::tie(best.num_edges, best_balance))
{
std::cout << "New Cut: " << cut.num_edges << " " << cut_balance << std::endl;
best_balance = cut_balance;
std::swap(best, cut);
}
else
{
std::cout << "Bad Cut: " << cut.num_edges << " " << cut_balance << std::endl;
}
}
// cut gets destroyed here
}

View File

@ -9,13 +9,16 @@
#include <iterator>
#include <tuple>
#include <utility>
#include <vector>
#include <boost/assert.hpp>
#include <tbb/task_scheduler_init.h>
#include "util/geojson_debug_logger.hpp"
#include "util/geojson_debug_policies.hpp"
#include "util/json_container.hpp"
namespace osrm
{
namespace partition
@ -137,6 +140,9 @@ int Partitioner::Run(const PartitionConfig &config)
<< compressed_node_based_graph.edges.size() << " edges, "
<< compressed_node_based_graph.coordinates.size() << " nodes";
groupEdgesBySource(begin(compressed_node_based_graph.edges),
end(compressed_node_based_graph.edges));
auto graph =
makeBisectionGraph(compressed_node_based_graph.coordinates,
adaptToBisectionEdge(std::move(compressed_node_based_graph.edges)));

View File

@ -158,8 +158,24 @@ RecursiveBisection::FakeFirstPartitionWithSCC(const std::size_t small_component_
}
}
views.push_back(GraphView(bisection_graph, last, bisection_graph.CEnd()));
bool has_small_component = [&]() {
for (std::size_t i = 0; i < scc_algo.GetNumberOfComponents(); ++i)
if (scc_algo.GetComponentSize(i) <= small_component_size)
return true;
return false;
}();
if (!has_small_component)
views.push_back(GraphView(bisection_graph, bisection_graph.CEnd(), bisection_graph.CEnd()));
return views;
}
const std::vector<RecursiveBisectionState::BisectionID> &RecursiveBisection::BisectionIDs() const
{
return internal_state.BisectionIDs();
}
} // namespace partition
} // namespace osrm

View File

@ -80,5 +80,11 @@ RecursiveBisectionState::ApplyBisection(const NodeIterator const_begin,
return const_begin + std::distance(begin, center);
}
const std::vector<RecursiveBisectionState::BisectionID> &
RecursiveBisectionState::BisectionIDs() const
{
return bisection_ids;
}
} // namespace partition
} // namespace osrm

View File

@ -14,8 +14,7 @@ std::size_t TarjanGraphWrapper::GetNumberOfNodes() const { return bisection_grap
util::range<EdgeID> TarjanGraphWrapper::GetAdjacentEdgeRange(const NodeID nid) const
{
const auto &node = bisection_graph.Node(nid);
return util::irange<EdgeID>(node.edges_begin, node.edges_end);
return util::irange<EdgeID>(bisection_graph.BeginEdgeID(nid), bisection_graph.EndEdgeID(nid));
}
NodeID TarjanGraphWrapper::GetTarget(const EdgeID eid) const

View File

@ -6,6 +6,10 @@ file(GLOB ExtractorTestsSources
extractor_tests.cpp
extractor/*.cpp)
file(GLOB PartitionTestsSources
partition_tests.cpp
partition/*.cpp)
file(GLOB LibraryTestsSources
library_tests.cpp
library/*.cpp)
@ -29,6 +33,11 @@ add_executable(extractor-tests
${ExtractorTestsSources}
$<TARGET_OBJECTS:EXTRACTOR> $<TARGET_OBJECTS:UTIL>)
add_executable(partition-tests
EXCLUDE_FROM_ALL
${PartitionTestsSources}
$<TARGET_OBJECTS:PARTITIONER> $<TARGET_OBJECTS:UTIL>)
add_executable(library-tests
EXCLUDE_FROM_ALL
${LibraryTestsSources})
@ -57,6 +66,7 @@ target_include_directories(util-tests PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
target_link_libraries(engine-tests ${ENGINE_LIBRARIES} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
target_link_libraries(extractor-tests ${EXTRACTOR_LIBRARIES} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
target_link_libraries(partition-tests ${PARTITIONER_LIBRARIES} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
target_link_libraries(library-tests osrm ${ENGINE_LIBRARIES} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
target_link_libraries(server-tests osrm ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
target_link_libraries(util-tests ${UTIL_LIBRARIES} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
@ -64,4 +74,4 @@ target_link_libraries(util-tests ${UTIL_LIBRARIES} ${Boost_UNIT_TEST_FRAMEWORK_L
add_custom_target(tests
DEPENDS
engine-tests extractor-tests library-tests server-tests util-tests)
engine-tests extractor-tests partition-tests library-tests server-tests util-tests)

View File

@ -0,0 +1,54 @@
#include "partition/reorder_first_last.hpp"
#include <boost/test/test_case_template.hpp>
#include <boost/test/unit_test.hpp>
#include <algorithm>
#include <functional>
#include <iterator>
#include <utility>
#include <vector>
using namespace osrm::partition;
BOOST_AUTO_TEST_SUITE(reorder_first_last)
BOOST_AUTO_TEST_CASE(reordering_one_is_equivalent_to_min_and_max)
{
std::vector<int> range{9, 0, 8, 1, 7, 2, 6, 3, 5, 4};
reorderFirstLast(begin(range), end(range), 1, std::less<>{});
BOOST_CHECK_EQUAL(range.front(), 0);
BOOST_CHECK_EQUAL(range.back(), 9);
reorderFirstLast(begin(range), end(range), 1, std::greater<>{});
BOOST_CHECK_EQUAL(range.front(), 9);
BOOST_CHECK_EQUAL(range.back(), 0);
}
BOOST_AUTO_TEST_CASE(reordering_n_shuffles_n_smallest_to_front_n_largest_to_back)
{
std::vector<int> range{9, 3, 8, 2};
reorderFirstLast(begin(range), end(range), 2, std::less<>{});
// Smallest at front, but: no ordering guarantee in that subrange!
BOOST_CHECK((range[0] == 2 and range[1] == 3) or (range[0] == 3 and range[1] == 2));
// Largest at back, but: no ordering guarantee in that subrange!
BOOST_CHECK((range[2] == 8 and range[3] == 9) or (range[2] == 9 and range[3] == 8));
}
BOOST_AUTO_TEST_CASE(reordering_n_with_iterators)
{
std::vector<int> range{9, 3, 8, 2};
reorderFirstLast(begin(range), end(range), 1, std::less<>{});
BOOST_CHECK_EQUAL(range.front(), 2);
BOOST_CHECK_EQUAL(range.back(), 9);
}
BOOST_AUTO_TEST_SUITE_END()

View File

@ -0,0 +1,7 @@
#define BOOST_TEST_MODULE partition tests
#include <boost/test/unit_test.hpp>
/*
* This file will contain an automatically generated main function.
*/