remove duplicated paths
This commit is contained in:
parent
4445f21e8a
commit
46dd9b9887
@ -1,174 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright (c) 2014, Project OSRM, Dennis Luxen, others
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list
|
||||
of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
*/
|
||||
|
||||
#ifndef BFS_COMPONENTS_HPP_
|
||||
#define BFS_COMPONENTS_HPP_
|
||||
|
||||
#include "../typedefs.h"
|
||||
#include "../data_structures/restriction_map.hpp"
|
||||
|
||||
#include <queue>
|
||||
#include <unordered_set>
|
||||
|
||||
// Explores the components of the given graph while respecting turn restrictions
|
||||
// and barriers.
|
||||
template <typename GraphT> class BFSComponentExplorer
|
||||
{
|
||||
public:
|
||||
BFSComponentExplorer(const GraphT &dynamic_graph,
|
||||
const RestrictionMap &restrictions,
|
||||
const std::unordered_set<NodeID> &barrier_nodes)
|
||||
: m_graph(dynamic_graph), m_restriction_map(restrictions), m_barrier_nodes(barrier_nodes)
|
||||
{
|
||||
BOOST_ASSERT(m_graph.GetNumberOfNodes() > 0);
|
||||
}
|
||||
|
||||
/*!
|
||||
* Returns the size of the component that the node belongs to.
|
||||
*/
|
||||
unsigned int GetComponentSize(const NodeID node) const
|
||||
{
|
||||
BOOST_ASSERT(node < m_component_index_list.size());
|
||||
|
||||
return m_component_index_size[m_component_index_list[node]];
|
||||
}
|
||||
|
||||
unsigned int GetNumberOfComponents() { return m_component_index_size.size(); }
|
||||
|
||||
/*!
|
||||
* Computes the component sizes.
|
||||
*/
|
||||
void run()
|
||||
{
|
||||
std::queue<std::pair<NodeID, NodeID>> bfs_queue;
|
||||
unsigned current_component = 0;
|
||||
|
||||
BOOST_ASSERT(m_component_index_list.empty());
|
||||
BOOST_ASSERT(m_component_index_size.empty());
|
||||
|
||||
unsigned num_nodes = m_graph.GetNumberOfNodes();
|
||||
|
||||
m_component_index_list.resize(num_nodes, std::numeric_limits<unsigned>::max());
|
||||
|
||||
BOOST_ASSERT(num_nodes > 0);
|
||||
|
||||
// put unexplorered node with parent pointer into queue
|
||||
for (NodeID node = 0; node < num_nodes; ++node)
|
||||
{
|
||||
if (std::numeric_limits<unsigned>::max() == m_component_index_list[node])
|
||||
{
|
||||
unsigned size = ExploreComponent(bfs_queue, node, current_component);
|
||||
|
||||
// push size into vector
|
||||
m_component_index_size.emplace_back(size);
|
||||
++current_component;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
/*!
|
||||
* Explores the current component that starts at node using BFS.
|
||||
*/
|
||||
unsigned ExploreComponent(std::queue<std::pair<NodeID, NodeID>> &bfs_queue,
|
||||
NodeID node,
|
||||
unsigned current_component)
|
||||
{
|
||||
/*
|
||||
Graphical representation of variables:
|
||||
|
||||
u v w
|
||||
*---------->*---------->*
|
||||
e2
|
||||
*/
|
||||
|
||||
bfs_queue.emplace(node, node);
|
||||
// mark node as read
|
||||
m_component_index_list[node] = current_component;
|
||||
|
||||
unsigned current_component_size = 1;
|
||||
|
||||
while (!bfs_queue.empty())
|
||||
{
|
||||
// fetch element from BFS queue
|
||||
std::pair<NodeID, NodeID> current_queue_item = bfs_queue.front();
|
||||
bfs_queue.pop();
|
||||
|
||||
const NodeID v = current_queue_item.first; // current node
|
||||
const NodeID u = current_queue_item.second; // parent
|
||||
// increment size counter of current component
|
||||
++current_component_size;
|
||||
const bool is_barrier_node = (m_barrier_nodes.find(v) != m_barrier_nodes.end());
|
||||
if (!is_barrier_node)
|
||||
{
|
||||
const NodeID to_node_of_only_restriction =
|
||||
m_restriction_map.CheckForEmanatingIsOnlyTurn(u, v);
|
||||
|
||||
for (auto e2 : m_graph.GetAdjacentEdgeRange(v))
|
||||
{
|
||||
const NodeID w = m_graph.GetTarget(e2);
|
||||
|
||||
if (to_node_of_only_restriction != std::numeric_limits<unsigned>::max() &&
|
||||
w != to_node_of_only_restriction)
|
||||
{
|
||||
// At an only_-restriction but not at the right turn
|
||||
continue;
|
||||
}
|
||||
|
||||
if (u != w)
|
||||
{
|
||||
// only add an edge if turn is not a U-turn except
|
||||
// when it is at the end of a dead-end street.
|
||||
if (!m_restriction_map.CheckIfTurnIsRestricted(u, v, w))
|
||||
{
|
||||
// only add an edge if turn is not prohibited
|
||||
if (std::numeric_limits<unsigned>::max() == m_component_index_list[w])
|
||||
{
|
||||
// insert next (node, parent) only if w has
|
||||
// not yet been explored
|
||||
// mark node as read
|
||||
m_component_index_list[w] = current_component;
|
||||
bfs_queue.emplace(w, v);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return current_component_size;
|
||||
}
|
||||
|
||||
std::vector<unsigned> m_component_index_list;
|
||||
std::vector<NodeID> m_component_index_size;
|
||||
|
||||
const GraphT &m_graph;
|
||||
const RestrictionMap &m_restriction_map;
|
||||
const std::unordered_set<NodeID> &m_barrier_nodes;
|
||||
};
|
||||
|
||||
#endif // BFS_COMPONENTS_HPP_
|
@ -1,146 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list
|
||||
of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
*/
|
||||
|
||||
#ifndef ITERATOR_BASED_CRC32_H
|
||||
#define ITERATOR_BASED_CRC32_H
|
||||
|
||||
#if defined(__x86_64__) && !defined(__MINGW64__)
|
||||
#include <cpuid.h>
|
||||
#endif
|
||||
|
||||
#include <boost/crc.hpp> // for boost::crc_32_type
|
||||
|
||||
#include <iterator>
|
||||
|
||||
class IteratorbasedCRC32
|
||||
{
|
||||
public:
|
||||
bool using_hardware() const { return use_hardware_implementation; }
|
||||
|
||||
IteratorbasedCRC32() : crc(0) { use_hardware_implementation = detect_hardware_support(); }
|
||||
|
||||
template <class Iterator> unsigned operator()(Iterator iter, const Iterator end)
|
||||
{
|
||||
unsigned crc = 0;
|
||||
while (iter != end)
|
||||
{
|
||||
using value_type = typename std::iterator_traits<Iterator>::value_type;
|
||||
char *data = (char *)(&(*iter));
|
||||
|
||||
if (use_hardware_implementation)
|
||||
{
|
||||
crc = compute_in_hardware(data, sizeof(value_type));
|
||||
}
|
||||
else
|
||||
{
|
||||
crc = compute_in_software(data, sizeof(value_type));
|
||||
}
|
||||
++iter;
|
||||
}
|
||||
return crc;
|
||||
}
|
||||
|
||||
private:
|
||||
bool detect_hardware_support() const
|
||||
{
|
||||
static const int sse42_bit = 0x00100000;
|
||||
const unsigned ecx = cpuid();
|
||||
const bool sse42_found = (ecx & sse42_bit) != 0;
|
||||
return sse42_found;
|
||||
}
|
||||
|
||||
unsigned compute_in_software(char *str, unsigned len)
|
||||
{
|
||||
crc_processor.process_bytes(str, len);
|
||||
return crc_processor.checksum();
|
||||
}
|
||||
|
||||
// adapted from http://byteworm.com/2010/10/13/crc32/
|
||||
unsigned compute_in_hardware(char *str, unsigned len)
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
unsigned q = len / sizeof(unsigned);
|
||||
unsigned r = len % sizeof(unsigned);
|
||||
unsigned *p = (unsigned *)str;
|
||||
|
||||
// crc=0;
|
||||
while (q--)
|
||||
{
|
||||
__asm__ __volatile__(".byte 0xf2, 0xf, 0x38, 0xf1, 0xf1;"
|
||||
: "=S"(crc)
|
||||
: "0"(crc), "c"(*p));
|
||||
++p;
|
||||
}
|
||||
|
||||
str = (char *)p;
|
||||
while (r--)
|
||||
{
|
||||
__asm__ __volatile__(".byte 0xf2, 0xf, 0x38, 0xf1, 0xf1;"
|
||||
: "=S"(crc)
|
||||
: "0"(crc), "c"(*str));
|
||||
++str;
|
||||
}
|
||||
#endif
|
||||
return crc;
|
||||
}
|
||||
|
||||
inline unsigned cpuid() const
|
||||
{
|
||||
unsigned eax = 0, ebx = 0, ecx = 0, edx = 0;
|
||||
// on X64 this calls hardware cpuid(.) instr. otherwise a dummy impl.
|
||||
__get_cpuid(1, &eax, &ebx, &ecx, &edx);
|
||||
return ecx;
|
||||
}
|
||||
|
||||
#if defined(__MINGW64__) || defined(_MSC_VER)
|
||||
inline void
|
||||
__get_cpuid(int param, unsigned *eax, unsigned *ebx, unsigned *ecx, unsigned *edx) const
|
||||
{
|
||||
*ecx = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
boost::crc_optimal<32, 0x1EDC6F41, 0x0, 0x0, true, true> crc_processor;
|
||||
unsigned crc;
|
||||
bool use_hardware_implementation;
|
||||
};
|
||||
|
||||
struct RangebasedCRC32
|
||||
{
|
||||
template<typename Iteratable>
|
||||
unsigned operator()(const Iteratable &iterable)
|
||||
{
|
||||
return crc32(std::begin(iterable), std::end(iterable));
|
||||
}
|
||||
|
||||
bool using_hardware() const { return crc32.using_hardware(); }
|
||||
|
||||
private:
|
||||
IteratorbasedCRC32 crc32;
|
||||
};
|
||||
|
||||
#endif /* ITERATOR_BASED_CRC32_H */
|
@ -1,166 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright (c) 2014, Project OSRM, Dennis Luxen, others
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list
|
||||
of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
*/
|
||||
|
||||
#include "douglas_peucker.hpp"
|
||||
|
||||
#include "../data_structures/segment_information.hpp"
|
||||
#include "../Util/integer_range.hpp"
|
||||
|
||||
#include <osrm/Coordinate.h>
|
||||
|
||||
#include <boost/assert.hpp>
|
||||
|
||||
#include <cmath>
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
namespace
|
||||
{
|
||||
struct CoordinatePairCalculator
|
||||
{
|
||||
CoordinatePairCalculator() = delete;
|
||||
CoordinatePairCalculator(const FixedPointCoordinate &coordinate_a,
|
||||
const FixedPointCoordinate &coordinate_b)
|
||||
{
|
||||
// initialize distance calculator with two fixed coordinates a, b
|
||||
const float RAD = 0.017453292519943295769236907684886f;
|
||||
first_lat = (coordinate_a.lat / COORDINATE_PRECISION) * RAD;
|
||||
first_lon = (coordinate_a.lon / COORDINATE_PRECISION) * RAD;
|
||||
second_lat = (coordinate_b.lat / COORDINATE_PRECISION) * RAD;
|
||||
second_lon = (coordinate_b.lon / COORDINATE_PRECISION) * RAD;
|
||||
}
|
||||
|
||||
int operator()(FixedPointCoordinate &other) const
|
||||
{
|
||||
// set third coordinate c
|
||||
const float RAD = 0.017453292519943295769236907684886f;
|
||||
const float earth_radius = 6372797.560856f;
|
||||
const float float_lat1 = (other.lat / COORDINATE_PRECISION) * RAD;
|
||||
const float float_lon1 = (other.lon / COORDINATE_PRECISION) * RAD;
|
||||
|
||||
// compute distance (a,c)
|
||||
const float x_value_1 = (first_lon - float_lon1) * cos((float_lat1 + first_lat) / 2.f);
|
||||
const float y_value_1 = first_lat - float_lat1;
|
||||
const float dist1 = sqrt(std::pow(x_value_1, 2) + std::pow(y_value_1, 2)) * earth_radius;
|
||||
|
||||
// compute distance (b,c)
|
||||
const float x_value_2 = (second_lon - float_lon1) * cos((float_lat1 + second_lat) / 2.f);
|
||||
const float y_value_2 = second_lat - float_lat1;
|
||||
const float dist2 = sqrt(std::pow(x_value_2, 2) + std::pow(y_value_2, 2)) * earth_radius;
|
||||
|
||||
// return the minimum
|
||||
return static_cast<int>(std::min(dist1, dist2));
|
||||
}
|
||||
|
||||
float first_lat;
|
||||
float first_lon;
|
||||
float second_lat;
|
||||
float second_lon;
|
||||
};
|
||||
}
|
||||
|
||||
void DouglasPeucker::Run(std::vector<SegmentInformation> &input_geometry, const unsigned zoom_level)
|
||||
{
|
||||
Run(std::begin(input_geometry), std::end(input_geometry), zoom_level);
|
||||
}
|
||||
|
||||
void DouglasPeucker::Run(RandomAccessIt begin, RandomAccessIt end, const unsigned zoom_level)
|
||||
{
|
||||
unsigned size = std::distance(begin, end);
|
||||
if (size < 2)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
begin->necessary = true;
|
||||
std::prev(end)->necessary = true;
|
||||
|
||||
{
|
||||
BOOST_ASSERT_MSG(zoom_level < DOUGLAS_PEUCKER_THRESHOLDS.size(), "unsupported zoom level");
|
||||
RandomAccessIt left_border = begin;
|
||||
RandomAccessIt right_border = std::next(begin);
|
||||
// Sweep over array and identify those ranges that need to be checked
|
||||
do
|
||||
{
|
||||
// traverse list until new border element found
|
||||
if (right_border->necessary)
|
||||
{
|
||||
// sanity checks
|
||||
BOOST_ASSERT(left_border->necessary);
|
||||
BOOST_ASSERT(right_border->necessary);
|
||||
recursion_stack.emplace(left_border, right_border);
|
||||
left_border = right_border;
|
||||
}
|
||||
++right_border;
|
||||
} while (right_border != end);
|
||||
}
|
||||
|
||||
// mark locations as 'necessary' by divide-and-conquer
|
||||
while (!recursion_stack.empty())
|
||||
{
|
||||
// pop next element
|
||||
const GeometryRange pair = recursion_stack.top();
|
||||
recursion_stack.pop();
|
||||
// sanity checks
|
||||
BOOST_ASSERT_MSG(pair.first->necessary, "left border must be necessary");
|
||||
BOOST_ASSERT_MSG(pair.second->necessary, "right border must be necessary");
|
||||
BOOST_ASSERT_MSG(std::distance(pair.second, end) > 0, "right border outside of geometry");
|
||||
BOOST_ASSERT_MSG(std::distance(pair.first, pair.second) >= 0,
|
||||
"left border on the wrong side");
|
||||
|
||||
int max_int_distance = 0;
|
||||
auto farthest_entry_it = pair.second;
|
||||
const CoordinatePairCalculator dist_calc(pair.first->location, pair.second->location);
|
||||
|
||||
// sweep over range to find the maximum
|
||||
for (auto it = std::next(pair.first); it != pair.second; ++it)
|
||||
{
|
||||
const int distance = dist_calc(it->location);
|
||||
// found new feasible maximum?
|
||||
if (distance > max_int_distance && distance > DOUGLAS_PEUCKER_THRESHOLDS[zoom_level])
|
||||
{
|
||||
farthest_entry_it = it;
|
||||
max_int_distance = distance;
|
||||
}
|
||||
}
|
||||
|
||||
// check if maximum violates a zoom level dependent threshold
|
||||
if (max_int_distance > DOUGLAS_PEUCKER_THRESHOLDS[zoom_level])
|
||||
{
|
||||
// mark idx as necessary
|
||||
farthest_entry_it->necessary = true;
|
||||
if (1 < std::distance(pair.first, farthest_entry_it))
|
||||
{
|
||||
recursion_stack.emplace(pair.first, farthest_entry_it);
|
||||
}
|
||||
if (1 < std::distance(farthest_entry_it, pair.second))
|
||||
{
|
||||
recursion_stack.emplace(farthest_entry_it, pair.second);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,80 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list
|
||||
of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
*/
|
||||
|
||||
#ifndef DOUGLAS_PEUCKER_HPP_
|
||||
#define DOUGLAS_PEUCKER_HPP_
|
||||
|
||||
#include <stack>
|
||||
#include <vector>
|
||||
#include <array>
|
||||
|
||||
/* This class object computes the bitvector of indicating generalized input
|
||||
* points according to the (Ramer-)Douglas-Peucker algorithm.
|
||||
*
|
||||
* Input is vector of pairs. Each pair consists of the point information and a
|
||||
* bit indicating if the points is present in the generalization.
|
||||
* Note: points may also be pre-selected*/
|
||||
|
||||
struct SegmentInformation;
|
||||
|
||||
static const std::array<int, 19> DOUGLAS_PEUCKER_THRESHOLDS {{
|
||||
512440, // z0
|
||||
256720, // z1
|
||||
122560, // z2
|
||||
56780, // z3
|
||||
28800, // z4
|
||||
14400, // z5
|
||||
7200, // z6
|
||||
3200, // z7
|
||||
2400, // z8
|
||||
1000, // z9
|
||||
600, // z10
|
||||
120, // z11
|
||||
60, // z12
|
||||
45, // z13
|
||||
36, // z14
|
||||
20, // z15
|
||||
8, // z16
|
||||
6, // z17
|
||||
4 // z18
|
||||
}};
|
||||
|
||||
class DouglasPeucker
|
||||
{
|
||||
public:
|
||||
using RandomAccessIt = std::vector<SegmentInformation>::iterator;
|
||||
|
||||
using GeometryRange = std::pair<RandomAccessIt, RandomAccessIt>;
|
||||
// Stack to simulate the recursion
|
||||
std::stack<GeometryRange> recursion_stack;
|
||||
|
||||
public:
|
||||
void Run(RandomAccessIt begin, RandomAccessIt end, const unsigned zoom_level);
|
||||
void Run(std::vector<SegmentInformation> &input_geometry, const unsigned zoom_level);
|
||||
};
|
||||
|
||||
#endif /* DOUGLAS_PEUCKER_HPP_ */
|
@ -1,94 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list
|
||||
of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
*/
|
||||
|
||||
#ifndef OBJECT_ENCODER_HPP
|
||||
#define OBJECT_ENCODER_HPP
|
||||
|
||||
#include "../Util/StringUtil.h"
|
||||
|
||||
#include <boost/assert.hpp>
|
||||
#include <boost/archive/iterators/base64_from_binary.hpp>
|
||||
#include <boost/archive/iterators/binary_from_base64.hpp>
|
||||
#include <boost/archive/iterators/transform_width.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
struct ObjectEncoder
|
||||
{
|
||||
using base64_t = boost::archive::iterators::base64_from_binary<
|
||||
boost::archive::iterators::transform_width<const char *, 6, 8>>;
|
||||
|
||||
using binary_t = boost::archive::iterators::transform_width<
|
||||
boost::archive::iterators::binary_from_base64<std::string::const_iterator>,
|
||||
8,
|
||||
6>;
|
||||
|
||||
template <class ObjectT>
|
||||
static void EncodeToBase64(const ObjectT &object, std::string &encoded)
|
||||
{
|
||||
const char *char_ptr_to_object = (const char *)&object;
|
||||
std::vector<unsigned char> data(sizeof(object));
|
||||
std::copy(char_ptr_to_object, char_ptr_to_object + sizeof(ObjectT), data.begin());
|
||||
|
||||
unsigned char number_of_padded_chars = 0; // is in {0,1,2};
|
||||
while (data.size() % 3 != 0)
|
||||
{
|
||||
++number_of_padded_chars;
|
||||
data.push_back(0x00);
|
||||
}
|
||||
|
||||
BOOST_ASSERT_MSG(0 == data.size() % 3, "base64 input data size is not a multiple of 3!");
|
||||
encoded.resize(sizeof(ObjectT));
|
||||
encoded.assign(base64_t(&data[0]),
|
||||
base64_t(&data[0] + (data.size() - number_of_padded_chars)));
|
||||
replaceAll(encoded, "+", "-");
|
||||
replaceAll(encoded, "/", "_");
|
||||
}
|
||||
|
||||
template <class ObjectT>
|
||||
static void DecodeFromBase64(const std::string &input, ObjectT &object)
|
||||
{
|
||||
try
|
||||
{
|
||||
std::string encoded(input);
|
||||
// replace "-" with "+" and "_" with "/"
|
||||
replaceAll(encoded, "-", "+");
|
||||
replaceAll(encoded, "_", "/");
|
||||
|
||||
std::copy(binary_t(encoded.begin()),
|
||||
binary_t(encoded.begin() + encoded.length() - 1),
|
||||
(char *)&object);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#endif /* OBJECT_ENCODER_HPP */
|
@ -1,98 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright (c) 2014, Project OSRM, Dennis Luxen, others
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list
|
||||
of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
*/
|
||||
|
||||
#include "polyline_compressor.hpp"
|
||||
#include "../data_structures/segment_information.hpp"
|
||||
|
||||
#include <osrm/Coordinate.h>
|
||||
|
||||
std::string PolylineCompressor::encode_vector(std::vector<int> &numbers) const
|
||||
{
|
||||
std::string output;
|
||||
const auto end = numbers.size();
|
||||
for (std::size_t i = 0; i < end; ++i)
|
||||
{
|
||||
numbers[i] <<= 1;
|
||||
if (numbers[i] < 0)
|
||||
{
|
||||
numbers[i] = ~(numbers[i]);
|
||||
}
|
||||
}
|
||||
for (const int number : numbers)
|
||||
{
|
||||
output += encode_number(number);
|
||||
}
|
||||
return output;
|
||||
}
|
||||
|
||||
std::string PolylineCompressor::encode_number(int number_to_encode) const
|
||||
{
|
||||
std::string output;
|
||||
while (number_to_encode >= 0x20)
|
||||
{
|
||||
const int next_value = (0x20 | (number_to_encode & 0x1f)) + 63;
|
||||
output += static_cast<char>(next_value);
|
||||
if (92 == next_value)
|
||||
{
|
||||
output += static_cast<char>(next_value);
|
||||
}
|
||||
number_to_encode >>= 5;
|
||||
}
|
||||
|
||||
number_to_encode += 63;
|
||||
output += static_cast<char>(number_to_encode);
|
||||
if (92 == number_to_encode)
|
||||
{
|
||||
output += static_cast<char>(number_to_encode);
|
||||
}
|
||||
return output;
|
||||
}
|
||||
|
||||
std::string
|
||||
PolylineCompressor::get_encoded_string(const std::vector<SegmentInformation> &polyline) const
|
||||
{
|
||||
if (polyline.empty())
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<int> delta_numbers;
|
||||
delta_numbers.reserve((polyline.size() - 1) * 2);
|
||||
FixedPointCoordinate previous_coordinate = {0, 0};
|
||||
for (const auto &segment : polyline)
|
||||
{
|
||||
if (segment.necessary)
|
||||
{
|
||||
const int lat_diff = segment.location.lat - previous_coordinate.lat;
|
||||
const int lon_diff = segment.location.lon - previous_coordinate.lon;
|
||||
delta_numbers.emplace_back(lat_diff);
|
||||
delta_numbers.emplace_back(lon_diff);
|
||||
previous_coordinate = segment.location;
|
||||
}
|
||||
}
|
||||
return encode_vector(delta_numbers);
|
||||
}
|
@ -1,47 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list
|
||||
of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
*/
|
||||
|
||||
#ifndef POLYLINECOMPRESSOR_H_
|
||||
#define POLYLINECOMPRESSOR_H_
|
||||
|
||||
struct SegmentInformation;
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
class PolylineCompressor
|
||||
{
|
||||
private:
|
||||
std::string encode_vector(std::vector<int> &numbers) const;
|
||||
|
||||
std::string encode_number(const int number_to_encode) const;
|
||||
|
||||
public:
|
||||
std::string get_encoded_string(const std::vector<SegmentInformation> &polyline) const;
|
||||
};
|
||||
|
||||
#endif /* POLYLINECOMPRESSOR_H_ */
|
@ -1,56 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright (c) 2014, Project OSRM, Dennis Luxen, others
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list
|
||||
of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
*/
|
||||
|
||||
#include "polyline_formatter.hpp"
|
||||
|
||||
#include "polyline_compressor.hpp"
|
||||
#include "../data_structures/segment_information.hpp"
|
||||
|
||||
#include <osrm/Coordinate.h>
|
||||
|
||||
JSON::String
|
||||
PolylineFormatter::printEncodedString(const std::vector<SegmentInformation> &polyline) const
|
||||
{
|
||||
return JSON::String(PolylineCompressor().get_encoded_string(polyline));
|
||||
}
|
||||
|
||||
JSON::Array
|
||||
PolylineFormatter::printUnencodedString(const std::vector<SegmentInformation> &polyline) const
|
||||
{
|
||||
JSON::Array json_geometry_array;
|
||||
for (const auto &segment : polyline)
|
||||
{
|
||||
if (segment.necessary)
|
||||
{
|
||||
JSON::Array json_coordinate;
|
||||
json_coordinate.values.push_back(segment.location.lat / COORDINATE_PRECISION);
|
||||
json_coordinate.values.push_back(segment.location.lon / COORDINATE_PRECISION);
|
||||
json_geometry_array.values.push_back(json_coordinate);
|
||||
}
|
||||
}
|
||||
return json_geometry_array;
|
||||
}
|
@ -1,45 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright (c) 2014, Project OSRM, Dennis Luxen, others
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list
|
||||
of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
*/
|
||||
|
||||
#ifndef POLYLINE_FORMATTER_HPP
|
||||
#define POLYLINE_FORMATTER_HPP
|
||||
|
||||
struct SegmentInformation;
|
||||
|
||||
#include "../data_structures/json_container.hpp"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
struct PolylineFormatter
|
||||
{
|
||||
JSON::String printEncodedString(const std::vector<SegmentInformation> &polyline) const;
|
||||
|
||||
JSON::Array printUnencodedString(const std::vector<SegmentInformation> &polyline) const;
|
||||
};
|
||||
|
||||
#endif /* POLYLINE_FORMATTER_HPP */
|
@ -1,169 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list
|
||||
of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
*/
|
||||
|
||||
#ifndef EXTRACT_ROUTE_NAMES_H
|
||||
#define EXTRACT_ROUTE_NAMES_H
|
||||
|
||||
#include <boost/assert.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
struct RouteNames
|
||||
{
|
||||
std::string shortest_path_name_1;
|
||||
std::string shortest_path_name_2;
|
||||
std::string alternative_path_name_1;
|
||||
std::string alternative_path_name_2;
|
||||
};
|
||||
|
||||
// construct routes names
|
||||
template <class DataFacadeT, class SegmentT> struct ExtractRouteNames
|
||||
{
|
||||
private:
|
||||
SegmentT PickNextLongestSegment(const std::vector<SegmentT> &segment_list,
|
||||
const unsigned blocked_name_id) const
|
||||
{
|
||||
SegmentT result_segment;
|
||||
result_segment.length = 0;
|
||||
|
||||
for (const SegmentT &segment : segment_list)
|
||||
{
|
||||
if (segment.name_id != blocked_name_id && segment.length > result_segment.length && segment.name_id != 0)
|
||||
{
|
||||
result_segment = segment;
|
||||
}
|
||||
}
|
||||
return result_segment;
|
||||
}
|
||||
|
||||
public:
|
||||
RouteNames operator()(std::vector<SegmentT> &shortest_path_segments,
|
||||
std::vector<SegmentT> &alternative_path_segments,
|
||||
const DataFacadeT *facade) const
|
||||
{
|
||||
RouteNames route_names;
|
||||
|
||||
SegmentT shortest_segment_1, shortest_segment_2;
|
||||
SegmentT alternative_segment_1, alternative_segment_2;
|
||||
|
||||
auto length_comperator = [](const SegmentT &a, const SegmentT &b)
|
||||
{ return a.length > b.length; };
|
||||
auto name_id_comperator = [](const SegmentT &a, const SegmentT &b)
|
||||
{ return a.name_id < b.name_id; };
|
||||
|
||||
if (shortest_path_segments.empty())
|
||||
{
|
||||
return route_names;
|
||||
}
|
||||
|
||||
// pick the longest segment for the shortest path.
|
||||
std::sort(shortest_path_segments.begin(), shortest_path_segments.end(), length_comperator);
|
||||
shortest_segment_1 = shortest_path_segments[0];
|
||||
if (!alternative_path_segments.empty())
|
||||
{
|
||||
std::sort(alternative_path_segments.begin(),
|
||||
alternative_path_segments.end(),
|
||||
length_comperator);
|
||||
|
||||
// also pick the longest segment for the alternative path
|
||||
alternative_segment_1 = alternative_path_segments[0];
|
||||
}
|
||||
|
||||
// compute the set difference (for shortest path) depending on names between shortest and
|
||||
// alternative
|
||||
std::vector<SegmentT> shortest_path_set_difference(shortest_path_segments.size());
|
||||
std::sort(shortest_path_segments.begin(), shortest_path_segments.end(), name_id_comperator);
|
||||
std::sort(alternative_path_segments.begin(), alternative_path_segments.end(), name_id_comperator);
|
||||
std::set_difference(shortest_path_segments.begin(),
|
||||
shortest_path_segments.end(),
|
||||
alternative_path_segments.begin(),
|
||||
alternative_path_segments.end(),
|
||||
shortest_path_set_difference.begin(),
|
||||
name_id_comperator);
|
||||
|
||||
std::sort(shortest_path_set_difference.begin(),
|
||||
shortest_path_set_difference.end(),
|
||||
length_comperator);
|
||||
shortest_segment_2 =
|
||||
PickNextLongestSegment(shortest_path_set_difference, shortest_segment_1.name_id);
|
||||
|
||||
// compute the set difference (for alternative path) depending on names between shortest and
|
||||
// alternative
|
||||
// vectors are still sorted, no need to do again
|
||||
BOOST_ASSERT(std::is_sorted(shortest_path_segments.begin(),
|
||||
shortest_path_segments.end(),
|
||||
name_id_comperator));
|
||||
BOOST_ASSERT(std::is_sorted(alternative_path_segments.begin(),
|
||||
alternative_path_segments.end(),
|
||||
name_id_comperator));
|
||||
|
||||
std::vector<SegmentT> alternative_path_set_difference(alternative_path_segments.size());
|
||||
std::set_difference(alternative_path_segments.begin(),
|
||||
alternative_path_segments.end(),
|
||||
shortest_path_segments.begin(),
|
||||
shortest_path_segments.end(),
|
||||
alternative_path_set_difference.begin(),
|
||||
name_id_comperator);
|
||||
|
||||
std::sort(alternative_path_set_difference.begin(),
|
||||
alternative_path_set_difference.end(),
|
||||
length_comperator);
|
||||
|
||||
if (!alternative_path_segments.empty())
|
||||
{
|
||||
alternative_segment_2 = PickNextLongestSegment(alternative_path_set_difference,
|
||||
alternative_segment_1.name_id);
|
||||
}
|
||||
|
||||
// move the segments into the order in which they occur.
|
||||
if (shortest_segment_1.position > shortest_segment_2.position)
|
||||
{
|
||||
std::swap(shortest_segment_1, shortest_segment_2);
|
||||
}
|
||||
if (alternative_segment_1.position > alternative_segment_2.position)
|
||||
{
|
||||
std::swap(alternative_segment_1, alternative_segment_2);
|
||||
}
|
||||
|
||||
// fetching names for the selected segments
|
||||
route_names.shortest_path_name_1 =
|
||||
facade->GetEscapedNameForNameID(shortest_segment_1.name_id);
|
||||
route_names.shortest_path_name_2 =
|
||||
facade->GetEscapedNameForNameID(shortest_segment_2.name_id);
|
||||
|
||||
route_names.alternative_path_name_1 =
|
||||
facade->GetEscapedNameForNameID(alternative_segment_1.name_id);
|
||||
route_names.alternative_path_name_2 =
|
||||
facade->GetEscapedNameForNameID(alternative_segment_2.name_id);
|
||||
|
||||
return route_names;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // EXTRACT_ROUTE_NAMES_H
|
@ -1,459 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright (c) 2014, Project OSRM, Dennis Luxen, others
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list
|
||||
of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
*/
|
||||
|
||||
#ifndef TINY_COMPONENTS_HPP
|
||||
#define TINY_COMPONENTS_HPP
|
||||
|
||||
#include "../typedefs.h"
|
||||
#include "../data_structures/deallocating_vector.hpp"
|
||||
#include "../data_structures/dynamic_graph.hpp"
|
||||
#include "../data_structures/import_edge.hpp"
|
||||
#include "../data_structures/query_node.hpp"
|
||||
#include "../data_structures/percent.hpp"
|
||||
#include "../data_structures/restriction.hpp"
|
||||
#include "../data_structures/turn_instructions.hpp"
|
||||
|
||||
#include "../Util/integer_range.hpp"
|
||||
#include "../Util/OSRMException.h"
|
||||
#include "../Util/simple_logger.hpp"
|
||||
#include "../Util/StdHashExtensions.h"
|
||||
#include "../Util/TimingUtil.h"
|
||||
|
||||
#include <osrm/Coordinate.h>
|
||||
|
||||
#include <boost/assert.hpp>
|
||||
#include <boost/filesystem.hpp>
|
||||
|
||||
#include <tbb/parallel_sort.h>
|
||||
|
||||
#if defined(__APPLE__) || defined (_WIN32)
|
||||
#include <gdal.h>
|
||||
#include <ogrsf_frmts.h>
|
||||
#else
|
||||
#include <gdal/gdal.h>
|
||||
#include <gdal/ogrsf_frmts.h>
|
||||
#endif
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include <memory>
|
||||
#include <stack>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
class TarjanSCC
|
||||
{
|
||||
private:
|
||||
struct TarjanNode
|
||||
{
|
||||
TarjanNode() : index(SPECIAL_NODEID), low_link(SPECIAL_NODEID), on_stack(false) {}
|
||||
unsigned index;
|
||||
unsigned low_link;
|
||||
bool on_stack;
|
||||
};
|
||||
|
||||
struct TarjanEdgeData
|
||||
{
|
||||
TarjanEdgeData() : distance(INVALID_EDGE_WEIGHT), name_id(INVALID_NAMEID) {}
|
||||
TarjanEdgeData(int distance, unsigned name_id) : distance(distance), name_id(name_id) {}
|
||||
int distance;
|
||||
unsigned name_id;
|
||||
};
|
||||
|
||||
struct TarjanStackFrame
|
||||
{
|
||||
explicit TarjanStackFrame(NodeID v, NodeID parent) : v(v), parent(parent) {}
|
||||
NodeID v;
|
||||
NodeID parent;
|
||||
};
|
||||
|
||||
using TarjanDynamicGraph = DynamicGraph<TarjanEdgeData>;
|
||||
using TarjanEdge = TarjanDynamicGraph::InputEdge;
|
||||
using RestrictionSource = std::pair<NodeID, NodeID>;
|
||||
using RestrictionTarget = std::pair<NodeID, bool>;
|
||||
using EmanatingRestrictionsVector = std::vector<RestrictionTarget>;
|
||||
using RestrictionMap = std::unordered_map<RestrictionSource, unsigned>;
|
||||
|
||||
std::vector<QueryNode> m_coordinate_list;
|
||||
std::vector<EmanatingRestrictionsVector> m_restriction_bucket_list;
|
||||
std::shared_ptr<TarjanDynamicGraph> m_node_based_graph;
|
||||
std::unordered_set<NodeID> barrier_node_list;
|
||||
std::unordered_set<NodeID> traffic_light_list;
|
||||
unsigned m_restriction_counter;
|
||||
RestrictionMap m_restriction_map;
|
||||
|
||||
public:
|
||||
TarjanSCC(int number_of_nodes,
|
||||
std::vector<NodeBasedEdge> &input_edges,
|
||||
std::vector<NodeID> &bn,
|
||||
std::vector<NodeID> &tl,
|
||||
std::vector<TurnRestriction> &irs,
|
||||
std::vector<QueryNode> &nI)
|
||||
: m_coordinate_list(nI), m_restriction_counter(irs.size())
|
||||
{
|
||||
TIMER_START(SCC_LOAD);
|
||||
for (const TurnRestriction &restriction : irs)
|
||||
{
|
||||
std::pair<NodeID, NodeID> restriction_source = {restriction.from.node,
|
||||
restriction.via.node};
|
||||
unsigned index = 0;
|
||||
const auto restriction_iterator = m_restriction_map.find(restriction_source);
|
||||
if (restriction_iterator == m_restriction_map.end())
|
||||
{
|
||||
index = m_restriction_bucket_list.size();
|
||||
m_restriction_bucket_list.resize(index + 1);
|
||||
m_restriction_map.emplace(restriction_source, index);
|
||||
}
|
||||
else
|
||||
{
|
||||
index = restriction_iterator->second;
|
||||
// Map already contains an is_only_*-restriction
|
||||
if (m_restriction_bucket_list.at(index).begin()->second)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
else if (restriction.flags.is_only)
|
||||
{
|
||||
// We are going to insert an is_only_*-restriction. There can be only one.
|
||||
m_restriction_bucket_list.at(index).clear();
|
||||
}
|
||||
}
|
||||
|
||||
m_restriction_bucket_list.at(index)
|
||||
.emplace_back(restriction.to.node, restriction.flags.is_only);
|
||||
}
|
||||
|
||||
barrier_node_list.insert(bn.begin(), bn.end());
|
||||
traffic_light_list.insert(tl.begin(), tl.end());
|
||||
|
||||
DeallocatingVector<TarjanEdge> edge_list;
|
||||
for (const NodeBasedEdge &input_edge : input_edges)
|
||||
{
|
||||
if (input_edge.source == input_edge.target)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (input_edge.forward)
|
||||
{
|
||||
edge_list.emplace_back(input_edge.source,
|
||||
input_edge.target,
|
||||
(std::max)((int)input_edge.weight, 1),
|
||||
input_edge.name_id);
|
||||
}
|
||||
if (input_edge.backward)
|
||||
{
|
||||
edge_list.emplace_back(input_edge.target,
|
||||
input_edge.source,
|
||||
(std::max)((int)input_edge.weight, 1),
|
||||
input_edge.name_id);
|
||||
}
|
||||
}
|
||||
input_edges.clear();
|
||||
input_edges.shrink_to_fit();
|
||||
BOOST_ASSERT_MSG(0 == input_edges.size() && 0 == input_edges.capacity(),
|
||||
"input edge vector not properly deallocated");
|
||||
|
||||
tbb::parallel_sort(edge_list.begin(), edge_list.end());
|
||||
m_node_based_graph = std::make_shared<TarjanDynamicGraph>(number_of_nodes, edge_list);
|
||||
TIMER_STOP(SCC_LOAD);
|
||||
SimpleLogger().Write() << "Loading data into SCC took " << TIMER_MSEC(SCC_LOAD)/1000. << "s";
|
||||
}
|
||||
|
||||
~TarjanSCC() { m_node_based_graph.reset(); }
|
||||
|
||||
void Run()
|
||||
{
|
||||
TIMER_START(SCC_RUN_SETUP);
|
||||
// remove files from previous run if exist
|
||||
DeleteFileIfExists("component.dbf");
|
||||
DeleteFileIfExists("component.shx");
|
||||
DeleteFileIfExists("component.shp");
|
||||
|
||||
Percent p(m_node_based_graph->GetNumberOfNodes());
|
||||
|
||||
OGRRegisterAll();
|
||||
|
||||
const char *pszDriverName = "ESRI Shapefile";
|
||||
OGRSFDriver *poDriver =
|
||||
OGRSFDriverRegistrar::GetRegistrar()->GetDriverByName(pszDriverName);
|
||||
if (nullptr == poDriver)
|
||||
{
|
||||
throw OSRMException("ESRI Shapefile driver not available");
|
||||
}
|
||||
OGRDataSource *poDS = poDriver->CreateDataSource("component.shp", nullptr);
|
||||
|
||||
if (nullptr == poDS)
|
||||
{
|
||||
throw OSRMException("Creation of output file failed");
|
||||
}
|
||||
|
||||
OGRSpatialReference *poSRS = new OGRSpatialReference();
|
||||
poSRS->importFromEPSG(4326);
|
||||
|
||||
OGRLayer *poLayer = poDS->CreateLayer("component", poSRS, wkbLineString, nullptr);
|
||||
|
||||
if (nullptr == poLayer)
|
||||
{
|
||||
throw OSRMException("Layer creation failed.");
|
||||
}
|
||||
TIMER_STOP(SCC_RUN_SETUP);
|
||||
SimpleLogger().Write() << "shapefile setup took " << TIMER_MSEC(SCC_RUN_SETUP)/1000. << "s";
|
||||
|
||||
TIMER_START(SCC_RUN);
|
||||
// The following is a hack to distinguish between stuff that happens
|
||||
// before the recursive call and stuff that happens after
|
||||
std::stack<TarjanStackFrame> recursion_stack;
|
||||
// true = stuff before, false = stuff after call
|
||||
std::stack<NodeID> tarjan_stack;
|
||||
std::vector<unsigned> components_index(m_node_based_graph->GetNumberOfNodes(),
|
||||
SPECIAL_NODEID);
|
||||
std::vector<NodeID> component_size_vector;
|
||||
std::vector<TarjanNode> tarjan_node_list(m_node_based_graph->GetNumberOfNodes());
|
||||
unsigned component_index = 0, size_of_current_component = 0;
|
||||
int index = 0;
|
||||
const NodeID last_node = m_node_based_graph->GetNumberOfNodes();
|
||||
std::vector<bool> processing_node_before_recursion(m_node_based_graph->GetNumberOfNodes(), true);
|
||||
for(const NodeID node : osrm::irange(0u, last_node))
|
||||
{
|
||||
if (SPECIAL_NODEID == components_index[node])
|
||||
{
|
||||
recursion_stack.emplace(TarjanStackFrame(node, node));
|
||||
}
|
||||
|
||||
while (!recursion_stack.empty())
|
||||
{
|
||||
TarjanStackFrame currentFrame = recursion_stack.top();
|
||||
const NodeID v = currentFrame.v;
|
||||
recursion_stack.pop();
|
||||
const bool before_recursion = processing_node_before_recursion[v];
|
||||
|
||||
if (before_recursion && tarjan_node_list[v].index != UINT_MAX)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (before_recursion)
|
||||
{
|
||||
// Mark frame to handle tail of recursion
|
||||
recursion_stack.emplace(currentFrame);
|
||||
processing_node_before_recursion[v] = false;
|
||||
|
||||
// Mark essential information for SCC
|
||||
tarjan_node_list[v].index = index;
|
||||
tarjan_node_list[v].low_link = index;
|
||||
tarjan_stack.push(v);
|
||||
tarjan_node_list[v].on_stack = true;
|
||||
++index;
|
||||
|
||||
// Traverse outgoing edges
|
||||
for (const auto current_edge : m_node_based_graph->GetAdjacentEdgeRange(v))
|
||||
{
|
||||
const TarjanDynamicGraph::NodeIterator vprime =
|
||||
m_node_based_graph->GetTarget(current_edge);
|
||||
if (SPECIAL_NODEID == tarjan_node_list[vprime].index)
|
||||
{
|
||||
recursion_stack.emplace(TarjanStackFrame(vprime, v));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (tarjan_node_list[vprime].on_stack &&
|
||||
tarjan_node_list[vprime].index < tarjan_node_list[v].low_link)
|
||||
{
|
||||
tarjan_node_list[v].low_link = tarjan_node_list[vprime].index;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
processing_node_before_recursion[v] = true;
|
||||
tarjan_node_list[currentFrame.parent].low_link =
|
||||
std::min(tarjan_node_list[currentFrame.parent].low_link,
|
||||
tarjan_node_list[v].low_link);
|
||||
// after recursion, lets do cycle checking
|
||||
// Check if we found a cycle. This is the bottom part of the recursion
|
||||
if (tarjan_node_list[v].low_link == tarjan_node_list[v].index)
|
||||
{
|
||||
NodeID vprime;
|
||||
do
|
||||
{
|
||||
vprime = tarjan_stack.top();
|
||||
tarjan_stack.pop();
|
||||
tarjan_node_list[vprime].on_stack = false;
|
||||
components_index[vprime] = component_index;
|
||||
++size_of_current_component;
|
||||
} while (v != vprime);
|
||||
|
||||
component_size_vector.emplace_back(size_of_current_component);
|
||||
|
||||
if (size_of_current_component > 1000)
|
||||
{
|
||||
SimpleLogger().Write() << "large component [" << component_index
|
||||
<< "]=" << size_of_current_component;
|
||||
}
|
||||
|
||||
++component_index;
|
||||
size_of_current_component = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TIMER_STOP(SCC_RUN);
|
||||
SimpleLogger().Write() << "SCC run took: " << TIMER_MSEC(SCC_RUN)/1000. << "s";
|
||||
SimpleLogger().Write() << "identified: " << component_size_vector.size()
|
||||
<< " many components, marking small components";
|
||||
|
||||
TIMER_START(SCC_OUTPUT);
|
||||
|
||||
const unsigned size_one_counter = std::count_if(component_size_vector.begin(),
|
||||
component_size_vector.end(),
|
||||
[](unsigned value)
|
||||
{
|
||||
return 1 == value;
|
||||
});
|
||||
|
||||
SimpleLogger().Write() << "identified " << size_one_counter << " SCCs of size 1";
|
||||
|
||||
uint64_t total_network_distance = 0;
|
||||
p.reinit(m_node_based_graph->GetNumberOfNodes());
|
||||
// const NodeID last_u_node = m_node_based_graph->GetNumberOfNodes();
|
||||
for (const NodeID source : osrm::irange(0u, last_node))
|
||||
{
|
||||
p.printIncrement();
|
||||
for (const auto current_edge : m_node_based_graph->GetAdjacentEdgeRange(source))
|
||||
{
|
||||
const TarjanDynamicGraph::NodeIterator target =
|
||||
m_node_based_graph->GetTarget(current_edge);
|
||||
|
||||
if (source < target ||
|
||||
m_node_based_graph->EndEdges(target) ==
|
||||
m_node_based_graph->FindEdge(target, source))
|
||||
{
|
||||
total_network_distance +=
|
||||
100 * FixedPointCoordinate::ApproximateEuclideanDistance(
|
||||
m_coordinate_list[source].lat,
|
||||
m_coordinate_list[source].lon,
|
||||
m_coordinate_list[target].lat,
|
||||
m_coordinate_list[target].lon);
|
||||
|
||||
BOOST_ASSERT(current_edge != SPECIAL_EDGEID);
|
||||
BOOST_ASSERT(source != SPECIAL_NODEID);
|
||||
BOOST_ASSERT(target != SPECIAL_NODEID);
|
||||
|
||||
const unsigned size_of_containing_component =
|
||||
std::min(component_size_vector[components_index[source]],
|
||||
component_size_vector[components_index[target]]);
|
||||
|
||||
// edges that end on bollard nodes may actually be in two distinct components
|
||||
if (size_of_containing_component < 10)
|
||||
{
|
||||
OGRLineString lineString;
|
||||
lineString.addPoint(m_coordinate_list[source].lon / COORDINATE_PRECISION,
|
||||
m_coordinate_list[source].lat / COORDINATE_PRECISION);
|
||||
lineString.addPoint(m_coordinate_list[target].lon / COORDINATE_PRECISION,
|
||||
m_coordinate_list[target].lat / COORDINATE_PRECISION);
|
||||
|
||||
OGRFeature *poFeature = OGRFeature::CreateFeature(poLayer->GetLayerDefn());
|
||||
|
||||
poFeature->SetGeometry(&lineString);
|
||||
if (OGRERR_NONE != poLayer->CreateFeature(poFeature))
|
||||
{
|
||||
throw OSRMException("Failed to create feature in shapefile.");
|
||||
}
|
||||
OGRFeature::DestroyFeature(poFeature);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
OGRDataSource::DestroyDataSource(poDS);
|
||||
component_size_vector.clear();
|
||||
component_size_vector.shrink_to_fit();
|
||||
BOOST_ASSERT_MSG(0 == component_size_vector.size() && 0 == component_size_vector.capacity(),
|
||||
"component_size_vector not properly deallocated");
|
||||
|
||||
components_index.clear();
|
||||
components_index.shrink_to_fit();
|
||||
BOOST_ASSERT_MSG(0 == components_index.size() && 0 == components_index.capacity(),
|
||||
"components_index not properly deallocated");
|
||||
TIMER_STOP(SCC_OUTPUT);
|
||||
SimpleLogger().Write() << "generating output took: " << TIMER_MSEC(SCC_OUTPUT)/1000. << "s";
|
||||
|
||||
SimpleLogger().Write() << "total network distance: "
|
||||
<< (uint64_t)total_network_distance / 100 / 1000. << " km";
|
||||
}
|
||||
|
||||
private:
|
||||
unsigned CheckForEmanatingIsOnlyTurn(const NodeID u, const NodeID v) const
|
||||
{
|
||||
std::pair<NodeID, NodeID> restriction_source = {u, v};
|
||||
const auto restriction_iterator = m_restriction_map.find(restriction_source);
|
||||
if (restriction_iterator != m_restriction_map.end())
|
||||
{
|
||||
const unsigned index = restriction_iterator->second;
|
||||
for (const RestrictionSource &restriction_target : m_restriction_bucket_list.at(index))
|
||||
{
|
||||
if (restriction_target.second)
|
||||
{
|
||||
return restriction_target.first;
|
||||
}
|
||||
}
|
||||
}
|
||||
return SPECIAL_NODEID;
|
||||
}
|
||||
|
||||
bool CheckIfTurnIsRestricted(const NodeID u, const NodeID v, const NodeID w) const
|
||||
{
|
||||
// only add an edge if turn is not a U-turn except it is the end of dead-end street.
|
||||
std::pair<NodeID, NodeID> restriction_source = {u, v};
|
||||
const auto restriction_iterator = m_restriction_map.find(restriction_source);
|
||||
if (restriction_iterator != m_restriction_map.end())
|
||||
{
|
||||
const unsigned index = restriction_iterator->second;
|
||||
for (const RestrictionTarget &restriction_target : m_restriction_bucket_list.at(index))
|
||||
{
|
||||
if (w == restriction_target.first)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void DeleteFileIfExists(const std::string &file_name) const
|
||||
{
|
||||
if (boost::filesystem::exists(file_name))
|
||||
{
|
||||
boost::filesystem::remove(file_name);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#endif /* TINY_COMPONENTS_HPP */
|
@ -1,189 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright (c) 2014, Project OSRM, Dennis Luxen, others
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list
|
||||
of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
*/
|
||||
|
||||
#include "../data_structures/original_edge_data.hpp"
|
||||
#include "../data_structures/query_node.hpp"
|
||||
#include "../data_structures/shared_memory_vector_wrapper.hpp"
|
||||
#include "../data_structures/static_rtree.hpp"
|
||||
#include "../data_structures/edge_based_node.hpp"
|
||||
#include "../Util/BoostFileSystemFix.h"
|
||||
|
||||
#include <osrm/Coordinate.h>
|
||||
|
||||
#include <random>
|
||||
|
||||
// Choosen by a fair W20 dice roll (this value is completely arbitrary)
|
||||
constexpr unsigned RANDOM_SEED = 13;
|
||||
constexpr int32_t WORLD_MIN_LAT = -90 * COORDINATE_PRECISION;
|
||||
constexpr int32_t WORLD_MAX_LAT = 90 * COORDINATE_PRECISION;
|
||||
constexpr int32_t WORLD_MIN_LON = -180 * COORDINATE_PRECISION;
|
||||
constexpr int32_t WORLD_MAX_LON = 180 * COORDINATE_PRECISION;
|
||||
|
||||
using RTreeLeaf = EdgeBasedNode;
|
||||
using FixedPointCoordinateListPtr = std::shared_ptr<std::vector<FixedPointCoordinate>>;
|
||||
using BenchStaticRTree = StaticRTree<RTreeLeaf, ShM<FixedPointCoordinate, false>::vector, false>;
|
||||
|
||||
FixedPointCoordinateListPtr LoadCoordinates(const boost::filesystem::path &nodes_file)
|
||||
{
|
||||
boost::filesystem::ifstream nodes_input_stream(nodes_file, std::ios::binary);
|
||||
|
||||
QueryNode current_node;
|
||||
unsigned number_of_coordinates = 0;
|
||||
nodes_input_stream.read((char *)&number_of_coordinates, sizeof(unsigned));
|
||||
auto coords = std::make_shared<std::vector<FixedPointCoordinate>>(number_of_coordinates);
|
||||
for (unsigned i = 0; i < number_of_coordinates; ++i)
|
||||
{
|
||||
nodes_input_stream.read((char *)¤t_node, sizeof(QueryNode));
|
||||
coords->at(i) = FixedPointCoordinate(current_node.lat, current_node.lon);
|
||||
BOOST_ASSERT((std::abs(coords->at(i).lat) >> 30) == 0);
|
||||
BOOST_ASSERT((std::abs(coords->at(i).lon) >> 30) == 0);
|
||||
}
|
||||
nodes_input_stream.close();
|
||||
return coords;
|
||||
}
|
||||
|
||||
void Benchmark(BenchStaticRTree &rtree, unsigned num_queries)
|
||||
{
|
||||
std::mt19937 mt_rand(RANDOM_SEED);
|
||||
std::uniform_int_distribution<> lat_udist(WORLD_MIN_LAT, WORLD_MAX_LAT);
|
||||
std::uniform_int_distribution<> lon_udist(WORLD_MIN_LON, WORLD_MAX_LON);
|
||||
std::vector<FixedPointCoordinate> queries;
|
||||
for (unsigned i = 0; i < num_queries; i++)
|
||||
{
|
||||
queries.emplace_back(FixedPointCoordinate(lat_udist(mt_rand), lon_udist(mt_rand)));
|
||||
}
|
||||
|
||||
{
|
||||
const unsigned num_results = 5;
|
||||
std::cout << "#### IncrementalFindPhantomNodeForCoordinate : " << num_results
|
||||
<< " phantom nodes"
|
||||
<< "\n";
|
||||
|
||||
TIMER_START(query_phantom);
|
||||
std::vector<PhantomNode> resulting_phantom_node_vector;
|
||||
for (const auto &q : queries)
|
||||
{
|
||||
resulting_phantom_node_vector.clear();
|
||||
rtree.IncrementalFindPhantomNodeForCoordinate(
|
||||
q, resulting_phantom_node_vector, 3, num_results);
|
||||
resulting_phantom_node_vector.clear();
|
||||
rtree.IncrementalFindPhantomNodeForCoordinate(
|
||||
q, resulting_phantom_node_vector, 17, num_results);
|
||||
}
|
||||
TIMER_STOP(query_phantom);
|
||||
|
||||
std::cout << "Took " << TIMER_MSEC(query_phantom) << " msec for " << num_queries
|
||||
<< " queries."
|
||||
<< "\n";
|
||||
std::cout << TIMER_MSEC(query_phantom) / ((double)num_queries) << " msec/query."
|
||||
<< "\n";
|
||||
|
||||
std::cout << "#### LocateClosestEndPointForCoordinate"
|
||||
<< "\n";
|
||||
}
|
||||
|
||||
TIMER_START(query_endpoint);
|
||||
FixedPointCoordinate result;
|
||||
for (const auto &q : queries)
|
||||
{
|
||||
rtree.LocateClosestEndPointForCoordinate(q, result, 3);
|
||||
}
|
||||
TIMER_STOP(query_endpoint);
|
||||
|
||||
std::cout << "Took " << TIMER_MSEC(query_endpoint) << " msec for " << num_queries << " queries."
|
||||
<< "\n";
|
||||
std::cout << TIMER_MSEC(query_endpoint) / ((double)num_queries) << " msec/query."
|
||||
<< "\n";
|
||||
|
||||
std::cout << "#### FindPhantomNodeForCoordinate"
|
||||
<< "\n";
|
||||
|
||||
TIMER_START(query_phantomnode);
|
||||
for (const auto &q : queries)
|
||||
{
|
||||
PhantomNode phantom;
|
||||
rtree.FindPhantomNodeForCoordinate(q, phantom, 3);
|
||||
}
|
||||
TIMER_STOP(query_phantomnode);
|
||||
|
||||
std::cout << "Took " << TIMER_MSEC(query_phantomnode) << " msec for " << num_queries
|
||||
<< " queries."
|
||||
<< "\n";
|
||||
std::cout << TIMER_MSEC(query_phantomnode) / ((double)num_queries) << " msec/query."
|
||||
<< "\n";
|
||||
|
||||
{
|
||||
const unsigned num_results = 1;
|
||||
std::cout << "#### IncrementalFindPhantomNodeForCoordinate : " << num_results
|
||||
<< " phantom nodes"
|
||||
<< "\n";
|
||||
|
||||
TIMER_START(query_phantom);
|
||||
std::vector<PhantomNode> resulting_phantom_node_vector;
|
||||
for (const auto &q : queries)
|
||||
{
|
||||
resulting_phantom_node_vector.clear();
|
||||
rtree.IncrementalFindPhantomNodeForCoordinate(
|
||||
q, resulting_phantom_node_vector, 3, num_results);
|
||||
resulting_phantom_node_vector.clear();
|
||||
rtree.IncrementalFindPhantomNodeForCoordinate(
|
||||
q, resulting_phantom_node_vector, 17, num_results);
|
||||
}
|
||||
TIMER_STOP(query_phantom);
|
||||
|
||||
std::cout << "Took " << TIMER_MSEC(query_phantom) << " msec for " << num_queries
|
||||
<< " queries."
|
||||
<< "\n";
|
||||
std::cout << TIMER_MSEC(query_phantom) / ((double)num_queries) << " msec/query."
|
||||
<< "\n";
|
||||
|
||||
std::cout << "#### LocateClosestEndPointForCoordinate"
|
||||
<< "\n";
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
if (argc < 4)
|
||||
{
|
||||
std::cout << "./rtree-bench file.ramIndex file.fileIndx file.nodes"
|
||||
<< "\n";
|
||||
return 1;
|
||||
}
|
||||
|
||||
const char *ramPath = argv[1];
|
||||
const char *filePath = argv[2];
|
||||
const char *nodesPath = argv[3];
|
||||
|
||||
auto coords = LoadCoordinates(nodesPath);
|
||||
|
||||
BenchStaticRTree rtree(ramPath, filePath, coords);
|
||||
|
||||
Benchmark(rtree, 10000);
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,973 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright (c) 2014, Project OSRM, Dennis Luxen, others
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list
|
||||
of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
*/
|
||||
|
||||
#ifndef CONTRACTOR_HPP
|
||||
#define CONTRACTOR_HPP
|
||||
|
||||
#include "../data_structures/binary_heap.hpp"
|
||||
#include "../data_structures/deallocating_vector.hpp"
|
||||
#include "../data_structures/dynamic_graph.hpp"
|
||||
#include "../data_structures/percent.hpp"
|
||||
#include "../data_structures/query_edge.hpp"
|
||||
#include "../data_structures/xor_fast_hash.hpp"
|
||||
#include "../data_structures/xor_fast_hash_storage.hpp"
|
||||
#include "../Util/integer_range.hpp"
|
||||
#include "../Util/simple_logger.hpp"
|
||||
#include "../Util/StringUtil.h"
|
||||
#include "../Util/TimingUtil.h"
|
||||
#include "../typedefs.h"
|
||||
|
||||
#include <boost/assert.hpp>
|
||||
|
||||
#include <stxxl/vector>
|
||||
|
||||
#include <tbb/enumerable_thread_specific.h>
|
||||
#include <tbb/parallel_for.h>
|
||||
#include <tbb/parallel_sort.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <limits>
|
||||
#include <vector>
|
||||
|
||||
class Contractor
|
||||
{
|
||||
|
||||
private:
|
||||
struct ContractorEdgeData
|
||||
{
|
||||
ContractorEdgeData()
|
||||
: distance(0), id(0), originalEdges(0), shortcut(0), forward(0), backward(0),
|
||||
is_original_via_node_ID(false)
|
||||
{
|
||||
}
|
||||
ContractorEdgeData(unsigned distance,
|
||||
unsigned original_edges,
|
||||
unsigned id,
|
||||
bool shortcut,
|
||||
bool forward,
|
||||
bool backward)
|
||||
: distance(distance), id(id),
|
||||
originalEdges(std::min((unsigned)1 << 28, original_edges)), shortcut(shortcut),
|
||||
forward(forward), backward(backward), is_original_via_node_ID(false)
|
||||
{
|
||||
}
|
||||
unsigned distance;
|
||||
unsigned id;
|
||||
unsigned originalEdges : 28;
|
||||
bool shortcut : 1;
|
||||
bool forward : 1;
|
||||
bool backward : 1;
|
||||
bool is_original_via_node_ID : 1;
|
||||
} data;
|
||||
|
||||
struct ContractorHeapData
|
||||
{
|
||||
short hop;
|
||||
bool target;
|
||||
ContractorHeapData() : hop(0), target(false) {}
|
||||
ContractorHeapData(short h, bool t) : hop(h), target(t) {}
|
||||
};
|
||||
|
||||
using ContractorGraph = DynamicGraph<ContractorEdgeData>;
|
||||
// using ContractorHeap = BinaryHeap<NodeID, NodeID, int, ContractorHeapData, ArrayStorage<NodeID, NodeID>
|
||||
// >;
|
||||
using ContractorHeap = BinaryHeap<NodeID, NodeID, int, ContractorHeapData, XORFastHashStorage<NodeID, NodeID>>;
|
||||
using ContractorEdge = ContractorGraph::InputEdge;
|
||||
|
||||
struct ContractorThreadData
|
||||
{
|
||||
ContractorHeap heap;
|
||||
std::vector<ContractorEdge> inserted_edges;
|
||||
std::vector<NodeID> neighbours;
|
||||
explicit ContractorThreadData(NodeID nodes) : heap(nodes) {}
|
||||
};
|
||||
|
||||
struct NodePriorityData
|
||||
{
|
||||
int depth;
|
||||
NodePriorityData() : depth(0) {}
|
||||
};
|
||||
|
||||
struct ContractionStats
|
||||
{
|
||||
int edges_deleted_count;
|
||||
int edges_added_count;
|
||||
int original_edges_deleted_count;
|
||||
int original_edges_added_count;
|
||||
ContractionStats()
|
||||
: edges_deleted_count(0), edges_added_count(0), original_edges_deleted_count(0),
|
||||
original_edges_added_count(0)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
struct RemainingNodeData
|
||||
{
|
||||
RemainingNodeData() : id(0), is_independent(false) {}
|
||||
NodeID id : 31;
|
||||
bool is_independent : 1;
|
||||
};
|
||||
|
||||
|
||||
struct ThreadDataContainer
|
||||
{
|
||||
explicit ThreadDataContainer(int number_of_nodes) : number_of_nodes(number_of_nodes) {}
|
||||
|
||||
inline ContractorThreadData* getThreadData()
|
||||
{
|
||||
bool exists = false;
|
||||
auto& ref = data.local(exists);
|
||||
if (!exists)
|
||||
{
|
||||
ref = std::make_shared<ContractorThreadData>(number_of_nodes);
|
||||
}
|
||||
|
||||
return ref.get();
|
||||
}
|
||||
|
||||
int number_of_nodes;
|
||||
using EnumerableThreadData = tbb::enumerable_thread_specific<std::shared_ptr<ContractorThreadData>>;
|
||||
EnumerableThreadData data;
|
||||
};
|
||||
|
||||
public:
|
||||
template <class ContainerT> Contractor(int nodes, ContainerT &input_edge_list)
|
||||
{
|
||||
std::vector<ContractorEdge> edges;
|
||||
edges.reserve(input_edge_list.size() * 2);
|
||||
|
||||
const auto dend = input_edge_list.dend();
|
||||
for (auto diter = input_edge_list.dbegin(); diter != dend; ++diter)
|
||||
{
|
||||
BOOST_ASSERT_MSG(static_cast<unsigned int>(std::max(diter->weight, 1)) > 0, "edge distance < 1");
|
||||
#ifndef NDEBUG
|
||||
if (static_cast<unsigned int>(std::max(diter->weight, 1)) > 24 * 60 * 60 * 10)
|
||||
{
|
||||
SimpleLogger().Write(logWARNING) << "Edge weight large -> "
|
||||
<< static_cast<unsigned int>(std::max(diter->weight, 1));
|
||||
}
|
||||
#endif
|
||||
edges.emplace_back(diter->source, diter->target,
|
||||
static_cast<unsigned int>(std::max(diter->weight, 1)),
|
||||
1,
|
||||
diter->edge_id,
|
||||
false,
|
||||
diter->forward ? true : false,
|
||||
diter->backward ? true : false);
|
||||
|
||||
edges.emplace_back(diter->target, diter->source,
|
||||
static_cast<unsigned int>(std::max(diter->weight, 1)),
|
||||
1,
|
||||
diter->edge_id,
|
||||
false,
|
||||
diter->backward ? true : false,
|
||||
diter->forward ? true : false);
|
||||
}
|
||||
// clear input vector
|
||||
input_edge_list.clear();
|
||||
edges.shrink_to_fit();
|
||||
|
||||
tbb::parallel_sort(edges.begin(), edges.end());
|
||||
NodeID edge = 0;
|
||||
for (NodeID i = 0; i < edges.size();)
|
||||
{
|
||||
const NodeID source = edges[i].source;
|
||||
const NodeID target = edges[i].target;
|
||||
const NodeID id = edges[i].data.id;
|
||||
// remove eigenloops
|
||||
if (source == target)
|
||||
{
|
||||
++i;
|
||||
continue;
|
||||
}
|
||||
ContractorEdge forward_edge;
|
||||
ContractorEdge reverse_edge;
|
||||
forward_edge.source = reverse_edge.source = source;
|
||||
forward_edge.target = reverse_edge.target = target;
|
||||
forward_edge.data.forward = reverse_edge.data.backward = true;
|
||||
forward_edge.data.backward = reverse_edge.data.forward = false;
|
||||
forward_edge.data.shortcut = reverse_edge.data.shortcut = false;
|
||||
forward_edge.data.id = reverse_edge.data.id = id;
|
||||
forward_edge.data.originalEdges = reverse_edge.data.originalEdges = 1;
|
||||
forward_edge.data.distance = reverse_edge.data.distance =
|
||||
std::numeric_limits<int>::max();
|
||||
// remove parallel edges
|
||||
while (i < edges.size() && edges[i].source == source && edges[i].target == target)
|
||||
{
|
||||
if (edges[i].data.forward)
|
||||
{
|
||||
forward_edge.data.distance =
|
||||
std::min(edges[i].data.distance, forward_edge.data.distance);
|
||||
}
|
||||
if (edges[i].data.backward)
|
||||
{
|
||||
reverse_edge.data.distance =
|
||||
std::min(edges[i].data.distance, reverse_edge.data.distance);
|
||||
}
|
||||
++i;
|
||||
}
|
||||
// merge edges (s,t) and (t,s) into bidirectional edge
|
||||
if (forward_edge.data.distance == reverse_edge.data.distance)
|
||||
{
|
||||
if ((int)forward_edge.data.distance != std::numeric_limits<int>::max())
|
||||
{
|
||||
forward_edge.data.backward = true;
|
||||
edges[edge++] = forward_edge;
|
||||
}
|
||||
}
|
||||
else
|
||||
{ // insert seperate edges
|
||||
if (((int)forward_edge.data.distance) != std::numeric_limits<int>::max())
|
||||
{
|
||||
edges[edge++] = forward_edge;
|
||||
}
|
||||
if ((int)reverse_edge.data.distance != std::numeric_limits<int>::max())
|
||||
{
|
||||
edges[edge++] = reverse_edge;
|
||||
}
|
||||
}
|
||||
}
|
||||
std::cout << "merged " << edges.size() - edge << " edges out of " << edges.size()
|
||||
<< std::endl;
|
||||
edges.resize(edge);
|
||||
contractor_graph = std::make_shared<ContractorGraph>(nodes, edges);
|
||||
edges.clear();
|
||||
edges.shrink_to_fit();
|
||||
|
||||
BOOST_ASSERT(0 == edges.capacity());
|
||||
// unsigned maxdegree = 0;
|
||||
// NodeID highestNode = 0;
|
||||
//
|
||||
// for(unsigned i = 0; i < contractor_graph->GetNumberOfNodes(); ++i) {
|
||||
// unsigned degree = contractor_graph->EndEdges(i) -
|
||||
// contractor_graph->BeginEdges(i);
|
||||
// if(degree > maxdegree) {
|
||||
// maxdegree = degree;
|
||||
// highestNode = i;
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// SimpleLogger().Write() << "edges at node with id " << highestNode << " has degree
|
||||
// " << maxdegree;
|
||||
// for(unsigned i = contractor_graph->BeginEdges(highestNode); i <
|
||||
// contractor_graph->EndEdges(highestNode); ++i) {
|
||||
// SimpleLogger().Write() << " ->(" << highestNode << "," <<
|
||||
// contractor_graph->GetTarget(i)
|
||||
// << "); via: " << contractor_graph->GetEdgeData(i).via;
|
||||
// }
|
||||
|
||||
std::cout << "contractor finished initalization" << std::endl;
|
||||
}
|
||||
|
||||
~Contractor() { }
|
||||
|
||||
void Run()
|
||||
{
|
||||
// for the preperation we can use a big grain size, which is much faster (probably cache)
|
||||
constexpr size_t InitGrainSize = 100000;
|
||||
constexpr size_t PQGrainSize = 100000;
|
||||
// auto_partitioner will automatically increase the blocksize if we have
|
||||
// a lot of data. It is *important* for the last loop iterations
|
||||
// (which have a very small dataset) that it is devisible.
|
||||
constexpr size_t IndependentGrainSize = 1;
|
||||
constexpr size_t ContractGrainSize = 1;
|
||||
constexpr size_t NeighboursGrainSize = 1;
|
||||
constexpr size_t DeleteGrainSize = 1;
|
||||
|
||||
const NodeID number_of_nodes = contractor_graph->GetNumberOfNodes();
|
||||
Percent p(number_of_nodes);
|
||||
|
||||
ThreadDataContainer thread_data_list(number_of_nodes);
|
||||
|
||||
NodeID number_of_contracted_nodes = 0;
|
||||
std::vector<RemainingNodeData> remaining_nodes(number_of_nodes);
|
||||
std::vector<float> node_priorities(number_of_nodes);
|
||||
std::vector<NodePriorityData> node_data(number_of_nodes);
|
||||
|
||||
|
||||
// initialize priorities in parallel
|
||||
tbb::parallel_for(tbb::blocked_range<int>(0, number_of_nodes, InitGrainSize),
|
||||
[&remaining_nodes](const tbb::blocked_range<int>& range)
|
||||
{
|
||||
for (int x = range.begin(); x != range.end(); ++x)
|
||||
{
|
||||
remaining_nodes[x].id = x;
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
|
||||
std::cout << "initializing elimination PQ ..." << std::flush;
|
||||
tbb::parallel_for(tbb::blocked_range<int>(0, number_of_nodes, PQGrainSize),
|
||||
[this, &node_priorities, &node_data, &thread_data_list](const tbb::blocked_range<int>& range)
|
||||
{
|
||||
ContractorThreadData *data = thread_data_list.getThreadData();
|
||||
for (int x = range.begin(); x != range.end(); ++x)
|
||||
{
|
||||
node_priorities[x] = this->EvaluateNodePriority(data, &node_data[x], x);
|
||||
}
|
||||
}
|
||||
);
|
||||
std::cout << "ok" << std::endl << "preprocessing " << number_of_nodes << " nodes ..."
|
||||
<< std::flush;
|
||||
|
||||
bool flushed_contractor = false;
|
||||
while (number_of_nodes > 2 && number_of_contracted_nodes < number_of_nodes)
|
||||
{
|
||||
if (!flushed_contractor && (number_of_contracted_nodes > (number_of_nodes * 0.65)))
|
||||
{
|
||||
DeallocatingVector<ContractorEdge> new_edge_set; // this one is not explicitely
|
||||
// cleared since it goes out of
|
||||
// scope anywa
|
||||
std::cout << " [flush " << number_of_contracted_nodes << " nodes] " << std::flush;
|
||||
|
||||
// Delete old heap data to free memory that we need for the coming operations
|
||||
thread_data_list.data.clear();
|
||||
|
||||
// Create new priority array
|
||||
std::vector<float> new_node_priority(remaining_nodes.size());
|
||||
// this map gives the old IDs from the new ones, necessary to get a consistent graph
|
||||
// at the end of contraction
|
||||
orig_node_id_to_new_id_map.resize(remaining_nodes.size());
|
||||
// this map gives the new IDs from the old ones, necessary to remap targets from the
|
||||
// remaining graph
|
||||
std::vector<NodeID> new_node_id_from_orig_id_map(number_of_nodes, UINT_MAX);
|
||||
|
||||
// build forward and backward renumbering map and remap ids in remaining_nodes and
|
||||
// Priorities.
|
||||
for (const auto new_node_id : osrm::irange<std::size_t>(0, remaining_nodes.size()))
|
||||
{
|
||||
// create renumbering maps in both directions
|
||||
orig_node_id_to_new_id_map[new_node_id] = remaining_nodes[new_node_id].id;
|
||||
new_node_id_from_orig_id_map[remaining_nodes[new_node_id].id] = new_node_id;
|
||||
new_node_priority[new_node_id] =
|
||||
node_priorities[remaining_nodes[new_node_id].id];
|
||||
remaining_nodes[new_node_id].id = new_node_id;
|
||||
}
|
||||
// walk over all nodes
|
||||
for (const auto i : osrm::irange<std::size_t>(0, contractor_graph->GetNumberOfNodes()))
|
||||
{
|
||||
const NodeID source = i;
|
||||
for (auto current_edge : contractor_graph->GetAdjacentEdgeRange(source))
|
||||
{
|
||||
ContractorGraph::EdgeData &data =
|
||||
contractor_graph->GetEdgeData(current_edge);
|
||||
const NodeID target = contractor_graph->GetTarget(current_edge);
|
||||
if (SPECIAL_NODEID == new_node_id_from_orig_id_map[i])
|
||||
{
|
||||
external_edge_list.push_back({source, target, data});
|
||||
}
|
||||
else
|
||||
{
|
||||
// node is not yet contracted.
|
||||
// add (renumbered) outgoing edges to new DynamicGraph.
|
||||
ContractorEdge new_edge = {
|
||||
new_node_id_from_orig_id_map[source],
|
||||
new_node_id_from_orig_id_map[target],
|
||||
data
|
||||
};
|
||||
|
||||
new_edge.data.is_original_via_node_ID = true;
|
||||
BOOST_ASSERT_MSG(UINT_MAX != new_node_id_from_orig_id_map[source],
|
||||
"new source id not resolveable");
|
||||
BOOST_ASSERT_MSG(UINT_MAX != new_node_id_from_orig_id_map[target],
|
||||
"new target id not resolveable");
|
||||
new_edge_set.push_back(new_edge);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete map from old NodeIDs to new ones.
|
||||
new_node_id_from_orig_id_map.clear();
|
||||
new_node_id_from_orig_id_map.shrink_to_fit();
|
||||
|
||||
// Replace old priorities array by new one
|
||||
node_priorities.swap(new_node_priority);
|
||||
// Delete old node_priorities vector
|
||||
new_node_priority.clear();
|
||||
new_node_priority.shrink_to_fit();
|
||||
// old Graph is removed
|
||||
contractor_graph.reset();
|
||||
|
||||
// create new graph
|
||||
std::sort(new_edge_set.begin(), new_edge_set.end());
|
||||
contractor_graph =
|
||||
std::make_shared<ContractorGraph>(remaining_nodes.size(), new_edge_set);
|
||||
|
||||
new_edge_set.clear();
|
||||
flushed_contractor = true;
|
||||
|
||||
// INFO: MAKE SURE THIS IS THE LAST OPERATION OF THE FLUSH!
|
||||
// reinitialize heaps and ThreadData objects with appropriate size
|
||||
thread_data_list.number_of_nodes = contractor_graph->GetNumberOfNodes();
|
||||
}
|
||||
|
||||
const int last = (int)remaining_nodes.size();
|
||||
tbb::parallel_for(tbb::blocked_range<int>(0, last, IndependentGrainSize),
|
||||
[this, &node_priorities, &remaining_nodes, &thread_data_list](const tbb::blocked_range<int>& range)
|
||||
{
|
||||
ContractorThreadData *data = thread_data_list.getThreadData();
|
||||
// determine independent node set
|
||||
for (int i = range.begin(); i != range.end(); ++i)
|
||||
{
|
||||
const NodeID node = remaining_nodes[i].id;
|
||||
remaining_nodes[i].is_independent =
|
||||
this->IsNodeIndependent(node_priorities, data, node);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
const auto first = stable_partition(remaining_nodes.begin(),
|
||||
remaining_nodes.end(),
|
||||
[](RemainingNodeData node_data)
|
||||
{ return !node_data.is_independent; });
|
||||
const int first_independent_node = static_cast<int>(first - remaining_nodes.begin());
|
||||
|
||||
// contract independent nodes
|
||||
tbb::parallel_for(tbb::blocked_range<int>(first_independent_node, last, ContractGrainSize),
|
||||
[this, &remaining_nodes, &thread_data_list](const tbb::blocked_range<int>& range)
|
||||
{
|
||||
ContractorThreadData *data = thread_data_list.getThreadData();
|
||||
for (int position = range.begin(); position != range.end(); ++position)
|
||||
{
|
||||
const NodeID x = remaining_nodes[position].id;
|
||||
this->ContractNode<false>(data, x);
|
||||
}
|
||||
}
|
||||
);
|
||||
// make sure we really sort each block
|
||||
tbb::parallel_for(thread_data_list.data.range(),
|
||||
[&](const ThreadDataContainer::EnumerableThreadData::range_type& range)
|
||||
{
|
||||
for (auto& data : range)
|
||||
std::sort(data->inserted_edges.begin(),
|
||||
data->inserted_edges.end());
|
||||
}
|
||||
);
|
||||
tbb::parallel_for(tbb::blocked_range<int>(first_independent_node, last, DeleteGrainSize),
|
||||
[this, &remaining_nodes, &thread_data_list](const tbb::blocked_range<int>& range)
|
||||
{
|
||||
ContractorThreadData *data = thread_data_list.getThreadData();
|
||||
for (int position = range.begin(); position != range.end(); ++position)
|
||||
{
|
||||
const NodeID x = remaining_nodes[position].id;
|
||||
this->DeleteIncomingEdges(data, x);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// insert new edges
|
||||
for (auto& data : thread_data_list.data)
|
||||
{
|
||||
for (const ContractorEdge &edge : data->inserted_edges)
|
||||
{
|
||||
const EdgeID current_edge_ID = contractor_graph->FindEdge(edge.source, edge.target);
|
||||
if (current_edge_ID < contractor_graph->EndEdges(edge.source))
|
||||
{
|
||||
ContractorGraph::EdgeData ¤t_data =
|
||||
contractor_graph->GetEdgeData(current_edge_ID);
|
||||
if (current_data.shortcut && edge.data.forward == current_data.forward &&
|
||||
edge.data.backward == current_data.backward &&
|
||||
edge.data.distance < current_data.distance)
|
||||
{
|
||||
// found a duplicate edge with smaller weight, update it.
|
||||
current_data = edge.data;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
contractor_graph->InsertEdge(edge.source, edge.target, edge.data);
|
||||
}
|
||||
data->inserted_edges.clear();
|
||||
}
|
||||
|
||||
tbb::parallel_for(tbb::blocked_range<int>(first_independent_node, last, NeighboursGrainSize),
|
||||
[this, &remaining_nodes, &node_priorities, &node_data, &thread_data_list](const tbb::blocked_range<int>& range)
|
||||
{
|
||||
ContractorThreadData *data = thread_data_list.getThreadData();
|
||||
for (int position = range.begin(); position != range.end(); ++position)
|
||||
{
|
||||
NodeID x = remaining_nodes[position].id;
|
||||
this->UpdateNodeNeighbours(node_priorities, node_data, data, x);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// remove contracted nodes from the pool
|
||||
number_of_contracted_nodes += last - first_independent_node;
|
||||
remaining_nodes.resize(first_independent_node);
|
||||
remaining_nodes.shrink_to_fit();
|
||||
// unsigned maxdegree = 0;
|
||||
// unsigned avgdegree = 0;
|
||||
// unsigned mindegree = UINT_MAX;
|
||||
// unsigned quaddegree = 0;
|
||||
//
|
||||
// for(unsigned i = 0; i < remaining_nodes.size(); ++i) {
|
||||
// unsigned degree = contractor_graph->EndEdges(remaining_nodes[i].first)
|
||||
// -
|
||||
// contractor_graph->BeginEdges(remaining_nodes[i].first);
|
||||
// if(degree > maxdegree)
|
||||
// maxdegree = degree;
|
||||
// if(degree < mindegree)
|
||||
// mindegree = degree;
|
||||
//
|
||||
// avgdegree += degree;
|
||||
// quaddegree += (degree*degree);
|
||||
// }
|
||||
//
|
||||
// avgdegree /= std::max((unsigned)1,(unsigned)remaining_nodes.size() );
|
||||
// quaddegree /= std::max((unsigned)1,(unsigned)remaining_nodes.size() );
|
||||
//
|
||||
// SimpleLogger().Write() << "rest: " << remaining_nodes.size() << ", max: "
|
||||
// << maxdegree << ", min: " << mindegree << ", avg: " << avgdegree << ",
|
||||
// quad: " << quaddegree;
|
||||
|
||||
p.printStatus(number_of_contracted_nodes);
|
||||
}
|
||||
|
||||
thread_data_list.data.clear();
|
||||
}
|
||||
|
||||
template <class Edge> inline void GetEdges(DeallocatingVector<Edge> &edges)
|
||||
{
|
||||
Percent p(contractor_graph->GetNumberOfNodes());
|
||||
SimpleLogger().Write() << "Getting edges of minimized graph";
|
||||
const NodeID number_of_nodes = contractor_graph->GetNumberOfNodes();
|
||||
if (contractor_graph->GetNumberOfNodes())
|
||||
{
|
||||
Edge new_edge;
|
||||
for (const auto node : osrm::irange(0u, number_of_nodes))
|
||||
{
|
||||
p.printStatus(node);
|
||||
for (auto edge : contractor_graph->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const NodeID target = contractor_graph->GetTarget(edge);
|
||||
const ContractorGraph::EdgeData &data = contractor_graph->GetEdgeData(edge);
|
||||
if (!orig_node_id_to_new_id_map.empty())
|
||||
{
|
||||
new_edge.source = orig_node_id_to_new_id_map[node];
|
||||
new_edge.target = orig_node_id_to_new_id_map[target];
|
||||
}
|
||||
else
|
||||
{
|
||||
new_edge.source = node;
|
||||
new_edge.target = target;
|
||||
}
|
||||
BOOST_ASSERT_MSG(UINT_MAX != new_edge.source, "Source id invalid");
|
||||
BOOST_ASSERT_MSG(UINT_MAX != new_edge.target, "Target id invalid");
|
||||
new_edge.data.distance = data.distance;
|
||||
new_edge.data.shortcut = data.shortcut;
|
||||
if (!data.is_original_via_node_ID && !orig_node_id_to_new_id_map.empty())
|
||||
{
|
||||
new_edge.data.id = orig_node_id_to_new_id_map[data.id];
|
||||
}
|
||||
else
|
||||
{
|
||||
new_edge.data.id = data.id;
|
||||
}
|
||||
BOOST_ASSERT_MSG(new_edge.data.id != INT_MAX, // 2^31
|
||||
"edge id invalid");
|
||||
new_edge.data.forward = data.forward;
|
||||
new_edge.data.backward = data.backward;
|
||||
edges.push_back(new_edge);
|
||||
}
|
||||
}
|
||||
}
|
||||
contractor_graph.reset();
|
||||
orig_node_id_to_new_id_map.clear();
|
||||
orig_node_id_to_new_id_map.shrink_to_fit();
|
||||
|
||||
BOOST_ASSERT(0 == orig_node_id_to_new_id_map.capacity());
|
||||
|
||||
edges.append(external_edge_list.begin(), external_edge_list.end());
|
||||
external_edge_list.clear();
|
||||
}
|
||||
|
||||
private:
|
||||
inline void Dijkstra(const int max_distance,
|
||||
const unsigned number_of_targets,
|
||||
const int maxNodes,
|
||||
ContractorThreadData *const data,
|
||||
const NodeID middleNode)
|
||||
{
|
||||
|
||||
ContractorHeap &heap = data->heap;
|
||||
|
||||
int nodes = 0;
|
||||
unsigned number_of_targets_found = 0;
|
||||
while (!heap.Empty())
|
||||
{
|
||||
const NodeID node = heap.DeleteMin();
|
||||
const int distance = heap.GetKey(node);
|
||||
const short current_hop = heap.GetData(node).hop + 1;
|
||||
|
||||
if (++nodes > maxNodes)
|
||||
{
|
||||
return;
|
||||
}
|
||||
if (distance > max_distance)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
// Destination settled?
|
||||
if (heap.GetData(node).target)
|
||||
{
|
||||
++number_of_targets_found;
|
||||
if (number_of_targets_found >= number_of_targets)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// iterate over all edges of node
|
||||
for (auto edge : contractor_graph->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const ContractorEdgeData &data = contractor_graph->GetEdgeData(edge);
|
||||
if (!data.forward)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
const NodeID to = contractor_graph->GetTarget(edge);
|
||||
if (middleNode == to)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
const int to_distance = distance + data.distance;
|
||||
|
||||
// New Node discovered -> Add to Heap + Node Info Storage
|
||||
if (!heap.WasInserted(to))
|
||||
{
|
||||
heap.Insert(to, to_distance, ContractorHeapData(current_hop, false));
|
||||
}
|
||||
// Found a shorter Path -> Update distance
|
||||
else if (to_distance < heap.GetKey(to))
|
||||
{
|
||||
heap.DecreaseKey(to, to_distance);
|
||||
heap.GetData(to).hop = current_hop;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline float EvaluateNodePriority(ContractorThreadData *const data,
|
||||
NodePriorityData *const node_data,
|
||||
const NodeID node)
|
||||
{
|
||||
ContractionStats stats;
|
||||
|
||||
// perform simulated contraction
|
||||
ContractNode<true>(data, node, &stats);
|
||||
|
||||
// Result will contain the priority
|
||||
float result;
|
||||
if (0 == (stats.edges_deleted_count * stats.original_edges_deleted_count))
|
||||
{
|
||||
result = 1.f * node_data->depth;
|
||||
}
|
||||
else
|
||||
{
|
||||
result = 2.f * (((float)stats.edges_added_count) / stats.edges_deleted_count) +
|
||||
4.f * (((float)stats.original_edges_added_count) /
|
||||
stats.original_edges_deleted_count) +
|
||||
1.f * node_data->depth;
|
||||
}
|
||||
BOOST_ASSERT(result >= 0);
|
||||
return result;
|
||||
}
|
||||
|
||||
template <bool RUNSIMULATION>
|
||||
inline bool
|
||||
ContractNode(ContractorThreadData *data, const NodeID node, ContractionStats *stats = nullptr)
|
||||
{
|
||||
ContractorHeap &heap = data->heap;
|
||||
int inserted_edges_size = data->inserted_edges.size();
|
||||
std::vector<ContractorEdge> &inserted_edges = data->inserted_edges;
|
||||
|
||||
for (auto in_edge : contractor_graph->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const ContractorEdgeData &in_data = contractor_graph->GetEdgeData(in_edge);
|
||||
const NodeID source = contractor_graph->GetTarget(in_edge);
|
||||
if (RUNSIMULATION)
|
||||
{
|
||||
BOOST_ASSERT(stats != nullptr);
|
||||
++stats->edges_deleted_count;
|
||||
stats->original_edges_deleted_count += in_data.originalEdges;
|
||||
}
|
||||
if (!in_data.backward)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
heap.Clear();
|
||||
heap.Insert(source, 0, ContractorHeapData());
|
||||
int max_distance = 0;
|
||||
unsigned number_of_targets = 0;
|
||||
|
||||
for (auto out_edge : contractor_graph->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const ContractorEdgeData &out_data = contractor_graph->GetEdgeData(out_edge);
|
||||
if (!out_data.forward)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
const NodeID target = contractor_graph->GetTarget(out_edge);
|
||||
const int path_distance = in_data.distance + out_data.distance;
|
||||
max_distance = std::max(max_distance, path_distance);
|
||||
if (!heap.WasInserted(target))
|
||||
{
|
||||
heap.Insert(target, INT_MAX, ContractorHeapData(0, true));
|
||||
++number_of_targets;
|
||||
}
|
||||
}
|
||||
|
||||
if (RUNSIMULATION)
|
||||
{
|
||||
Dijkstra(max_distance, number_of_targets, 1000, data, node);
|
||||
}
|
||||
else
|
||||
{
|
||||
Dijkstra(max_distance, number_of_targets, 2000, data, node);
|
||||
}
|
||||
for (auto out_edge : contractor_graph->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const ContractorEdgeData &out_data = contractor_graph->GetEdgeData(out_edge);
|
||||
if (!out_data.forward)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
const NodeID target = contractor_graph->GetTarget(out_edge);
|
||||
const int path_distance = in_data.distance + out_data.distance;
|
||||
const int distance = heap.GetKey(target);
|
||||
if (path_distance < distance)
|
||||
{
|
||||
if (RUNSIMULATION)
|
||||
{
|
||||
BOOST_ASSERT(stats != nullptr);
|
||||
stats->edges_added_count += 2;
|
||||
stats->original_edges_added_count +=
|
||||
2 * (out_data.originalEdges + in_data.originalEdges);
|
||||
}
|
||||
else
|
||||
{
|
||||
inserted_edges.emplace_back(source, target, path_distance,
|
||||
out_data.originalEdges + in_data.originalEdges,
|
||||
node,
|
||||
true,
|
||||
true,
|
||||
false);
|
||||
|
||||
inserted_edges.emplace_back(target, source, path_distance,
|
||||
out_data.originalEdges + in_data.originalEdges,
|
||||
node,
|
||||
true,
|
||||
false,
|
||||
true);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!RUNSIMULATION)
|
||||
{
|
||||
int iend = inserted_edges.size();
|
||||
for (int i = inserted_edges_size; i < iend; ++i)
|
||||
{
|
||||
bool found = false;
|
||||
for (int other = i + 1; other < iend; ++other)
|
||||
{
|
||||
if (inserted_edges[other].source != inserted_edges[i].source)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
if (inserted_edges[other].target != inserted_edges[i].target)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
if (inserted_edges[other].data.distance != inserted_edges[i].data.distance)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
if (inserted_edges[other].data.shortcut != inserted_edges[i].data.shortcut)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
inserted_edges[other].data.forward |= inserted_edges[i].data.forward;
|
||||
inserted_edges[other].data.backward |= inserted_edges[i].data.backward;
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
if (!found)
|
||||
{
|
||||
inserted_edges[inserted_edges_size++] = inserted_edges[i];
|
||||
}
|
||||
}
|
||||
inserted_edges.resize(inserted_edges_size);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
inline void DeleteIncomingEdges(ContractorThreadData *data, const NodeID node)
|
||||
{
|
||||
std::vector<NodeID> &neighbours = data->neighbours;
|
||||
neighbours.clear();
|
||||
|
||||
// find all neighbours
|
||||
for (auto e : contractor_graph->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const NodeID u = contractor_graph->GetTarget(e);
|
||||
if (u != node)
|
||||
{
|
||||
neighbours.push_back(u);
|
||||
}
|
||||
}
|
||||
// eliminate duplicate entries ( forward + backward edges )
|
||||
std::sort(neighbours.begin(), neighbours.end());
|
||||
neighbours.resize(std::unique(neighbours.begin(), neighbours.end()) - neighbours.begin());
|
||||
|
||||
for (const auto i : osrm::irange<std::size_t>(0, neighbours.size()))
|
||||
{
|
||||
contractor_graph->DeleteEdgesTo(neighbours[i], node);
|
||||
}
|
||||
}
|
||||
|
||||
inline bool UpdateNodeNeighbours(std::vector<float> &priorities,
|
||||
std::vector<NodePriorityData> &node_data,
|
||||
ContractorThreadData *const data,
|
||||
const NodeID node)
|
||||
{
|
||||
std::vector<NodeID> &neighbours = data->neighbours;
|
||||
neighbours.clear();
|
||||
|
||||
// find all neighbours
|
||||
for (auto e : contractor_graph->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const NodeID u = contractor_graph->GetTarget(e);
|
||||
if (u == node)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
neighbours.push_back(u);
|
||||
node_data[u].depth = (std::max)(node_data[node].depth + 1, node_data[u].depth);
|
||||
}
|
||||
// eliminate duplicate entries ( forward + backward edges )
|
||||
std::sort(neighbours.begin(), neighbours.end());
|
||||
neighbours.resize(std::unique(neighbours.begin(), neighbours.end()) - neighbours.begin());
|
||||
|
||||
// re-evaluate priorities of neighboring nodes
|
||||
for (const NodeID u : neighbours)
|
||||
{
|
||||
priorities[u] = EvaluateNodePriority(data, &(node_data)[u], u);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
inline bool IsNodeIndependent(
|
||||
const std::vector<float> &priorities,
|
||||
ContractorThreadData *const data,
|
||||
NodeID node) const
|
||||
{
|
||||
const float priority = priorities[node];
|
||||
|
||||
std::vector<NodeID> &neighbours = data->neighbours;
|
||||
neighbours.clear();
|
||||
|
||||
for (auto e : contractor_graph->GetAdjacentEdgeRange(node))
|
||||
{
|
||||
const NodeID target = contractor_graph->GetTarget(e);
|
||||
if (node == target)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
const float target_priority = priorities[target];
|
||||
BOOST_ASSERT(target_priority >= 0);
|
||||
// found a neighbour with lower priority?
|
||||
if (priority > target_priority)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// tie breaking
|
||||
if (std::abs(priority - target_priority) < std::numeric_limits<float>::epsilon() &&
|
||||
bias(node, target))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
neighbours.push_back(target);
|
||||
}
|
||||
|
||||
std::sort(neighbours.begin(), neighbours.end());
|
||||
neighbours.resize(std::unique(neighbours.begin(), neighbours.end()) - neighbours.begin());
|
||||
|
||||
// examine all neighbours that are at most 2 hops away
|
||||
for (const NodeID u : neighbours)
|
||||
{
|
||||
for (auto e : contractor_graph->GetAdjacentEdgeRange(u))
|
||||
{
|
||||
const NodeID target = contractor_graph->GetTarget(e);
|
||||
if (node == target)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
const float target_priority = priorities[target];
|
||||
BOOST_ASSERT(target_priority >= 0);
|
||||
// found a neighbour with lower priority?
|
||||
if (priority > target_priority)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// tie breaking
|
||||
if (std::abs(priority - target_priority) < std::numeric_limits<float>::epsilon() &&
|
||||
bias(node, target))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// This bias function takes up 22 assembly instructions in total on X86
|
||||
inline bool bias(const NodeID a, const NodeID b) const
|
||||
{
|
||||
const unsigned short hasha = fast_hash(a);
|
||||
const unsigned short hashb = fast_hash(b);
|
||||
|
||||
// The compiler optimizes that to conditional register flags but without branching
|
||||
// statements!
|
||||
if (hasha != hashb)
|
||||
{
|
||||
return hasha < hashb;
|
||||
}
|
||||
return a < b;
|
||||
}
|
||||
|
||||
std::shared_ptr<ContractorGraph> contractor_graph;
|
||||
std::vector<ContractorGraph::InputEdge> contracted_edge_list;
|
||||
stxxl::vector<QueryEdge> external_edge_list;
|
||||
std::vector<NodeID> orig_node_id_to_new_id_map;
|
||||
XORFastHash fast_hash;
|
||||
};
|
||||
|
||||
#endif // CONTRACTOR_HPP
|
@ -1,263 +0,0 @@
|
||||
/*
|
||||
|
||||
Copyright (c) 2014, Project OSRM, Dennis Luxen, others
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list
|
||||
of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
*/
|
||||
|
||||
#include "extractor.hpp"
|
||||
|
||||
#include "extraction_containers.hpp"
|
||||
#include "extraction_node.hpp"
|
||||
#include "extraction_way.hpp"
|
||||
#include "extractor_callbacks.hpp"
|
||||
#include "extractor_options.hpp"
|
||||
#include "restriction_parser.hpp"
|
||||
#include "scripting_environment.hpp"
|
||||
|
||||
#include "../Util/GitDescription.h"
|
||||
#include "../Util/IniFileUtil.h"
|
||||
#include "../Util/OSRMException.h"
|
||||
#include "../Util/simple_logger.hpp"
|
||||
#include "../Util/TimingUtil.h"
|
||||
#include "../Util/make_unique.hpp"
|
||||
|
||||
#include "../typedefs.h"
|
||||
|
||||
#include <luabind/luabind.hpp>
|
||||
|
||||
#include <osmium/io/any_input.hpp>
|
||||
|
||||
#include <tbb/parallel_for.h>
|
||||
#include <tbb/task_scheduler_init.h>
|
||||
|
||||
#include <variant/optional.hpp>
|
||||
|
||||
#include <cstdlib>
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
int Extractor::Run(int argc, char *argv[])
|
||||
{
|
||||
ExtractorConfig extractor_config;
|
||||
|
||||
try
|
||||
{
|
||||
LogPolicy::GetInstance().Unmute();
|
||||
TIMER_START(extracting);
|
||||
|
||||
if (!ExtractorOptions::ParseArguments(argc, argv, extractor_config))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
ExtractorOptions::GenerateOutputFilesNames(extractor_config);
|
||||
|
||||
if (1 > extractor_config.requested_num_threads)
|
||||
{
|
||||
SimpleLogger().Write(logWARNING) << "Number of threads must be 1 or larger";
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!boost::filesystem::is_regular_file(extractor_config.input_path))
|
||||
{
|
||||
SimpleLogger().Write(logWARNING)
|
||||
<< "Input file " << extractor_config.input_path.string() << " not found!";
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!boost::filesystem::is_regular_file(extractor_config.profile_path))
|
||||
{
|
||||
SimpleLogger().Write(logWARNING) << "Profile " << extractor_config.profile_path.string()
|
||||
<< " not found!";
|
||||
return 1;
|
||||
}
|
||||
|
||||
const unsigned recommended_num_threads = tbb::task_scheduler_init::default_num_threads();
|
||||
const auto number_of_threads = std::min(recommended_num_threads, extractor_config.requested_num_threads);
|
||||
tbb::task_scheduler_init init(number_of_threads);
|
||||
|
||||
SimpleLogger().Write() << "Input file: " << extractor_config.input_path.filename().string();
|
||||
SimpleLogger().Write() << "Profile: " << extractor_config.profile_path.filename().string();
|
||||
SimpleLogger().Write() << "Threads: " << number_of_threads;
|
||||
|
||||
// setup scripting environment
|
||||
ScriptingEnvironment scripting_environment(extractor_config.profile_path.string().c_str());
|
||||
|
||||
std::unordered_map<std::string, NodeID> string_map;
|
||||
string_map[""] = 0;
|
||||
|
||||
ExtractionContainers extraction_containers;
|
||||
auto extractor_callbacks =
|
||||
osrm::make_unique<ExtractorCallbacks>(extraction_containers, string_map);
|
||||
|
||||
osmium::io::File input_file(extractor_config.input_path.string());
|
||||
osmium::io::Reader reader(input_file);
|
||||
osmium::io::Header header = reader.header();
|
||||
|
||||
unsigned number_of_nodes = 0;
|
||||
unsigned number_of_ways = 0;
|
||||
unsigned number_of_relations = 0;
|
||||
unsigned number_of_others = 0;
|
||||
|
||||
SimpleLogger().Write() << "Parsing in progress..";
|
||||
TIMER_START(parsing);
|
||||
|
||||
std::string generator = header.get("generator");
|
||||
if (generator.empty())
|
||||
{
|
||||
generator = "unknown tool";
|
||||
}
|
||||
SimpleLogger().Write() << "input file generated by " << generator;
|
||||
|
||||
// write .timestamp data file
|
||||
std::string timestamp = header.get("osmosis_replication_timestamp");
|
||||
if (timestamp.empty())
|
||||
{
|
||||
timestamp = "n/a";
|
||||
}
|
||||
SimpleLogger().Write() << "timestamp: " << timestamp;
|
||||
|
||||
boost::filesystem::ofstream timestamp_out(extractor_config.timestamp_file_name);
|
||||
timestamp_out.write(timestamp.c_str(), timestamp.length());
|
||||
timestamp_out.close();
|
||||
|
||||
// initialize vectors holding parsed objects
|
||||
tbb::concurrent_vector<std::pair<std::size_t, ExtractionNode>> resulting_nodes;
|
||||
tbb::concurrent_vector<std::pair<std::size_t, ExtractionWay>> resulting_ways;
|
||||
tbb::concurrent_vector<mapbox::util::optional<InputRestrictionContainer>>
|
||||
resulting_restrictions;
|
||||
|
||||
// setup restriction parser
|
||||
RestrictionParser restriction_parser(scripting_environment.getLuaState());
|
||||
|
||||
while (osmium::memory::Buffer buffer = reader.read())
|
||||
{
|
||||
// create a vector of iterators into the buffer
|
||||
std::vector<osmium::memory::Buffer::iterator> osm_elements;
|
||||
osmium::memory::Buffer::iterator iter = std::begin(buffer);
|
||||
while (iter != std::end(buffer))
|
||||
{
|
||||
osm_elements.push_back(iter);
|
||||
iter = std::next(iter);
|
||||
}
|
||||
|
||||
// clear resulting vectors
|
||||
resulting_nodes.clear();
|
||||
resulting_ways.clear();
|
||||
resulting_restrictions.clear();
|
||||
|
||||
// parse OSM entities in parallel, store in resulting vectors
|
||||
tbb::parallel_for(tbb::blocked_range<std::size_t>(0, osm_elements.size()),
|
||||
[&](const tbb::blocked_range<std::size_t> &range)
|
||||
{
|
||||
for (auto x = range.begin(); x != range.end(); ++x)
|
||||
{
|
||||
auto entity = osm_elements[x];
|
||||
|
||||
ExtractionNode result_node;
|
||||
ExtractionWay result_way;
|
||||
|
||||
switch (entity->type())
|
||||
{
|
||||
case osmium::item_type::node:
|
||||
++number_of_nodes;
|
||||
luabind::call_function<void>(
|
||||
scripting_environment.getLuaState(),
|
||||
"node_function",
|
||||
boost::cref(static_cast<osmium::Node &>(*entity)),
|
||||
boost::ref(result_node));
|
||||
resulting_nodes.push_back(std::make_pair(x, result_node));
|
||||
break;
|
||||
case osmium::item_type::way:
|
||||
++number_of_ways;
|
||||
luabind::call_function<void>(
|
||||
scripting_environment.getLuaState(),
|
||||
"way_function",
|
||||
boost::cref(static_cast<osmium::Way &>(*entity)),
|
||||
boost::ref(result_way));
|
||||
resulting_ways.push_back(std::make_pair(x, result_way));
|
||||
break;
|
||||
case osmium::item_type::relation:
|
||||
++number_of_relations;
|
||||
resulting_restrictions.push_back(
|
||||
restriction_parser.TryParse(static_cast<osmium::Relation &>(*entity)));
|
||||
break;
|
||||
default:
|
||||
++number_of_others;
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// put parsed objects thru extractor callbacks
|
||||
for (const auto &result : resulting_nodes)
|
||||
{
|
||||
extractor_callbacks->ProcessNode(
|
||||
static_cast<osmium::Node &>(*(osm_elements[result.first])), result.second);
|
||||
}
|
||||
for (const auto &result : resulting_ways)
|
||||
{
|
||||
extractor_callbacks->ProcessWay(
|
||||
static_cast<osmium::Way &>(*(osm_elements[result.first])), result.second);
|
||||
}
|
||||
for (const auto &result : resulting_restrictions)
|
||||
{
|
||||
extractor_callbacks->ProcessRestriction(result);
|
||||
}
|
||||
}
|
||||
TIMER_STOP(parsing);
|
||||
SimpleLogger().Write() << "Parsing finished after " << TIMER_SEC(parsing) << " seconds";
|
||||
SimpleLogger().Write() << "Raw input contains " << number_of_nodes << " nodes, "
|
||||
<< number_of_ways << " ways, and " << number_of_relations
|
||||
<< " relations, and " << number_of_others << " unknown entities";
|
||||
|
||||
extractor_callbacks.reset();
|
||||
|
||||
if (extraction_containers.all_edges_list.empty())
|
||||
{
|
||||
SimpleLogger().Write(logWARNING) << "The input data is empty, exiting.";
|
||||
return 1;
|
||||
}
|
||||
|
||||
extraction_containers.PrepareData(extractor_config.output_file_name,
|
||||
extractor_config.restriction_file_name);
|
||||
TIMER_STOP(extracting);
|
||||
SimpleLogger().Write() << "extraction finished after " << TIMER_SEC(extracting) << "s";
|
||||
SimpleLogger().Write() << "To prepare the data for routing, run: "
|
||||
<< "./osrm-prepare " << extractor_config.output_file_name
|
||||
<< std::endl;
|
||||
}
|
||||
catch (std::exception &e)
|
||||
{
|
||||
SimpleLogger().Write(logWARNING) << e.what();
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
Loading…
Reference in New Issue
Block a user