2016-01-02 11:13:44 -05:00
|
|
|
#include "contractor/contractor.hpp"
|
2016-03-09 07:18:09 -05:00
|
|
|
#include "contractor/crc32_processor.hpp"
|
2016-01-07 13:19:55 -05:00
|
|
|
#include "contractor/graph_contractor.hpp"
|
2015-11-09 15:14:39 -05:00
|
|
|
|
2016-01-29 20:52:20 -05:00
|
|
|
#include "extractor/compressed_edge_container.hpp"
|
2016-06-24 01:01:37 -04:00
|
|
|
#include "extractor/edge_based_graph_factory.hpp"
|
2016-05-16 17:11:01 -04:00
|
|
|
#include "extractor/node_based_edge.hpp"
|
2016-01-29 20:52:20 -05:00
|
|
|
|
2016-11-18 06:14:38 -05:00
|
|
|
#include "storage/io.hpp"
|
2016-05-16 17:11:01 -04:00
|
|
|
#include "util/exception.hpp"
|
2016-12-06 15:30:46 -05:00
|
|
|
#include "util/exception_utils.hpp"
|
2016-01-02 11:13:44 -05:00
|
|
|
#include "util/graph_loader.hpp"
|
|
|
|
#include "util/integer_range.hpp"
|
2016-05-16 17:11:01 -04:00
|
|
|
#include "util/io.hpp"
|
2016-12-06 15:30:46 -05:00
|
|
|
#include "util/log.hpp"
|
2016-05-16 17:11:01 -04:00
|
|
|
#include "util/static_graph.hpp"
|
|
|
|
#include "util/static_rtree.hpp"
|
2016-01-02 11:13:44 -05:00
|
|
|
#include "util/string_util.hpp"
|
|
|
|
#include "util/timing_util.hpp"
|
|
|
|
#include "util/typedefs.hpp"
|
2014-07-03 07:29:15 -04:00
|
|
|
|
2016-03-09 07:18:09 -05:00
|
|
|
#include <boost/assert.hpp>
|
2014-07-03 07:29:15 -04:00
|
|
|
#include <boost/filesystem/fstream.hpp>
|
2016-04-29 03:48:13 -04:00
|
|
|
#include <boost/functional/hash.hpp>
|
2016-05-17 15:09:54 -04:00
|
|
|
#include <boost/interprocess/file_mapping.hpp>
|
|
|
|
#include <boost/interprocess/mapped_region.hpp>
|
2016-05-18 10:59:52 -04:00
|
|
|
#include <boost/spirit/include/qi.hpp>
|
2014-07-03 07:29:15 -04:00
|
|
|
|
2016-05-16 18:02:07 -04:00
|
|
|
#include <tbb/blocked_range.h>
|
|
|
|
#include <tbb/concurrent_unordered_map.h>
|
2016-05-28 11:36:31 -04:00
|
|
|
#include <tbb/enumerable_thread_specific.h>
|
2016-05-16 18:02:07 -04:00
|
|
|
#include <tbb/parallel_for.h>
|
2016-05-17 15:09:54 -04:00
|
|
|
#include <tbb/parallel_for_each.h>
|
2016-05-16 17:20:52 -04:00
|
|
|
#include <tbb/parallel_invoke.h>
|
2014-07-03 07:29:15 -04:00
|
|
|
#include <tbb/parallel_sort.h>
|
2016-05-20 13:20:12 -04:00
|
|
|
#include <tbb/spin_mutex.h>
|
2014-07-03 07:29:15 -04:00
|
|
|
|
2016-05-20 13:20:12 -04:00
|
|
|
#include <algorithm>
|
2016-01-07 04:33:47 -05:00
|
|
|
#include <bitset>
|
2016-05-16 17:11:01 -04:00
|
|
|
#include <cstdint>
|
2016-05-18 10:59:52 -04:00
|
|
|
#include <fstream>
|
2016-05-16 17:11:01 -04:00
|
|
|
#include <iterator>
|
2014-07-03 07:29:15 -04:00
|
|
|
#include <memory>
|
|
|
|
#include <thread>
|
2016-04-29 03:48:13 -04:00
|
|
|
#include <tuple>
|
2016-05-18 10:59:52 -04:00
|
|
|
#include <vector>
|
2014-07-03 07:29:15 -04:00
|
|
|
|
2016-01-05 10:51:13 -05:00
|
|
|
namespace std
|
|
|
|
{
|
|
|
|
|
|
|
|
template <> struct hash<std::pair<OSMNodeID, OSMNodeID>>
|
|
|
|
{
|
2016-05-16 17:20:52 -04:00
|
|
|
std::size_t operator()(const std::pair<OSMNodeID, OSMNodeID> &k) const noexcept
|
2016-01-05 10:51:13 -05:00
|
|
|
{
|
2016-02-23 15:23:13 -05:00
|
|
|
return static_cast<uint64_t>(k.first) ^ (static_cast<uint64_t>(k.second) << 12);
|
2016-01-05 10:51:13 -05:00
|
|
|
}
|
|
|
|
};
|
2016-04-29 03:48:13 -04:00
|
|
|
|
|
|
|
template <> struct hash<std::tuple<OSMNodeID, OSMNodeID, OSMNodeID>>
|
|
|
|
{
|
2016-05-16 17:20:52 -04:00
|
|
|
std::size_t operator()(const std::tuple<OSMNodeID, OSMNodeID, OSMNodeID> &k) const noexcept
|
2016-04-29 03:48:13 -04:00
|
|
|
{
|
|
|
|
std::size_t seed = 0;
|
|
|
|
boost::hash_combine(seed, static_cast<uint64_t>(std::get<0>(k)));
|
|
|
|
boost::hash_combine(seed, static_cast<uint64_t>(std::get<1>(k)));
|
|
|
|
boost::hash_combine(seed, static_cast<uint64_t>(std::get<2>(k)));
|
|
|
|
return seed;
|
|
|
|
}
|
|
|
|
};
|
2016-01-05 10:51:13 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace osrm
|
|
|
|
{
|
|
|
|
namespace contractor
|
|
|
|
{
|
|
|
|
|
2016-07-21 12:57:21 -04:00
|
|
|
// Returns duration in deci-seconds
|
|
|
|
inline EdgeWeight distanceAndSpeedToWeight(double distance_in_meters, double speed_in_kmh)
|
|
|
|
{
|
|
|
|
BOOST_ASSERT(speed_in_kmh > 0);
|
|
|
|
const double speed_in_ms = speed_in_kmh / 3.6;
|
|
|
|
const double duration = distance_in_meters / speed_in_ms;
|
|
|
|
return std::max<EdgeWeight>(1, static_cast<EdgeWeight>(std::round(duration * 10)));
|
|
|
|
}
|
|
|
|
|
2016-09-12 12:16:56 -04:00
|
|
|
// Returns updated edge weight
|
|
|
|
template <class IterType>
|
|
|
|
EdgeWeight getNewWeight(IterType speed_iter,
|
|
|
|
const double &segment_length,
|
|
|
|
const std::vector<std::string> &segment_speed_filenames,
|
|
|
|
const EdgeWeight old_weight,
|
|
|
|
const double log_edge_updates_factor)
|
|
|
|
{
|
|
|
|
const auto new_segment_weight =
|
|
|
|
(speed_iter->speed_source.speed > 0)
|
|
|
|
? distanceAndSpeedToWeight(segment_length, speed_iter->speed_source.speed)
|
|
|
|
: INVALID_EDGE_WEIGHT;
|
|
|
|
// the check here is enabled by the `--edge-weight-updates-over-factor` flag
|
|
|
|
// it logs a warning if the new weight exceeds a heuristic of what a reasonable weight update is
|
|
|
|
if (log_edge_updates_factor > 0 && old_weight != 0)
|
|
|
|
{
|
|
|
|
auto new_secs = new_segment_weight / 10.0;
|
|
|
|
auto old_secs = old_weight / 10.0;
|
|
|
|
auto approx_original_speed = (segment_length / old_secs) * 3.6;
|
|
|
|
if (old_weight >= (new_segment_weight * log_edge_updates_factor))
|
|
|
|
{
|
|
|
|
auto speed_file = segment_speed_filenames.at(speed_iter->speed_source.source - 1);
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log(logWARNING) << "[weight updates] Edge weight update from " << old_secs
|
|
|
|
<< "s to " << new_secs
|
|
|
|
<< "s New speed: " << speed_iter->speed_source.speed << " kph"
|
|
|
|
<< ". Old speed: " << approx_original_speed << " kph"
|
|
|
|
<< ". Segment length: " << segment_length << " m"
|
|
|
|
<< ". Segment: " << speed_iter->segment.from << ","
|
|
|
|
<< speed_iter->segment.to << " based on " << speed_file;
|
2016-09-12 12:16:56 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return new_segment_weight;
|
|
|
|
}
|
|
|
|
|
2016-01-07 13:19:55 -05:00
|
|
|
int Contractor::Run()
|
2014-07-03 07:29:15 -04:00
|
|
|
{
|
2015-04-21 04:43:02 -04:00
|
|
|
#ifdef WIN32
|
|
|
|
#pragma message("Memory consumption on Windows can be higher due to different bit packing")
|
|
|
|
#else
|
2016-05-13 13:18:00 -04:00
|
|
|
static_assert(sizeof(extractor::NodeBasedEdge) == 24,
|
2016-01-05 10:51:13 -05:00
|
|
|
"changing extractor::NodeBasedEdge type has influence on memory consumption!");
|
|
|
|
static_assert(sizeof(extractor::EdgeBasedEdge) == 16,
|
2015-04-21 04:43:02 -04:00
|
|
|
"changing EdgeBasedEdge type has influence on memory consumption!");
|
|
|
|
#endif
|
|
|
|
|
2016-01-05 06:04:04 -05:00
|
|
|
if (config.core_factor > 1.0 || config.core_factor < 0)
|
2015-10-14 18:08:22 -04:00
|
|
|
{
|
2016-12-06 15:30:46 -05:00
|
|
|
throw util::exception("Core factor must be between 0.0 to 1.0 (inclusive)" + SOURCE_REF);
|
2015-10-14 18:08:22 -04:00
|
|
|
}
|
|
|
|
|
2014-07-03 07:29:15 -04:00
|
|
|
TIMER_START(preparing);
|
|
|
|
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "Loading edge-expanded graph representation";
|
2014-07-03 07:29:15 -04:00
|
|
|
|
2016-01-05 10:51:13 -05:00
|
|
|
util::DeallocatingVector<extractor::EdgeBasedEdge> edge_based_edge_list;
|
2015-07-04 11:37:24 -04:00
|
|
|
|
2016-06-11 11:23:29 -04:00
|
|
|
EdgeID max_edge_id = LoadEdgeExpandedGraph(config.edge_based_graph_path,
|
|
|
|
edge_based_edge_list,
|
|
|
|
config.edge_segment_lookup_path,
|
|
|
|
config.edge_penalty_path,
|
|
|
|
config.segment_speed_lookup_paths,
|
|
|
|
config.turn_penalty_lookup_paths,
|
|
|
|
config.node_based_graph_path,
|
|
|
|
config.geometry_path,
|
|
|
|
config.datasource_names_path,
|
|
|
|
config.datasource_indexes_path,
|
2016-09-12 12:16:56 -04:00
|
|
|
config.rtree_leaf_path,
|
|
|
|
config.log_edge_updates_factor);
|
2014-07-03 07:29:15 -04:00
|
|
|
|
2015-04-23 11:48:41 -04:00
|
|
|
// Contracting the edge-expanded graph
|
2014-07-03 07:29:15 -04:00
|
|
|
|
|
|
|
TIMER_START(contraction);
|
2015-08-08 09:28:05 -04:00
|
|
|
std::vector<bool> is_core_node;
|
2015-11-09 15:14:39 -05:00
|
|
|
std::vector<float> node_levels;
|
|
|
|
if (config.use_cached_priority)
|
|
|
|
{
|
|
|
|
ReadNodeLevels(node_levels);
|
|
|
|
}
|
2016-01-07 04:33:47 -05:00
|
|
|
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "Reading node weights.";
|
2016-01-07 04:33:47 -05:00
|
|
|
std::vector<EdgeWeight> node_weights;
|
|
|
|
std::string node_file_name = config.osrm_input_path.string() + ".enw";
|
2016-11-15 03:08:45 -05:00
|
|
|
|
2016-01-20 22:00:12 -05:00
|
|
|
{
|
2016-11-15 17:49:28 -05:00
|
|
|
storage::io::FileReader node_file(node_file_name,
|
|
|
|
storage::io::FileReader::VerifyFingerprint);
|
2016-11-15 03:08:45 -05:00
|
|
|
node_file.DeserializeVector(node_weights);
|
2016-01-20 22:00:12 -05:00
|
|
|
}
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "Done reading node weights.";
|
2016-01-07 04:33:47 -05:00
|
|
|
|
2016-01-05 10:51:13 -05:00
|
|
|
util::DeallocatingVector<QueryEdge> contracted_edge_list;
|
2016-05-27 15:05:04 -04:00
|
|
|
ContractGraph(max_edge_id,
|
|
|
|
edge_based_edge_list,
|
|
|
|
contracted_edge_list,
|
|
|
|
std::move(node_weights),
|
|
|
|
is_core_node,
|
|
|
|
node_levels);
|
2014-07-03 07:29:15 -04:00
|
|
|
TIMER_STOP(contraction);
|
|
|
|
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "Contraction took " << TIMER_SEC(contraction) << " sec";
|
2014-07-03 07:29:15 -04:00
|
|
|
|
2015-10-14 18:08:22 -04:00
|
|
|
std::size_t number_of_used_edges = WriteContractedGraph(max_edge_id, contracted_edge_list);
|
2015-08-08 09:28:05 -04:00
|
|
|
WriteCoreNodeMarker(std::move(is_core_node));
|
2015-11-09 15:14:39 -05:00
|
|
|
if (!config.use_cached_priority)
|
|
|
|
{
|
|
|
|
WriteNodeLevels(std::move(node_levels));
|
|
|
|
}
|
2015-04-23 12:53:36 -04:00
|
|
|
|
|
|
|
TIMER_STOP(preparing);
|
|
|
|
|
2016-11-18 06:14:38 -05:00
|
|
|
const auto nodes_per_second =
|
|
|
|
static_cast<std::uint64_t>((max_edge_id + 1) / TIMER_SEC(contraction));
|
|
|
|
const auto edges_per_second =
|
|
|
|
static_cast<std::uint64_t>(number_of_used_edges / TIMER_SEC(contraction));
|
|
|
|
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "Preprocessing : " << TIMER_SEC(preparing) << " seconds";
|
|
|
|
util::Log() << "Contraction: " << nodes_per_second << " nodes/sec and " << edges_per_second
|
|
|
|
<< " edges/sec";
|
2015-04-23 12:53:36 -04:00
|
|
|
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "finished preprocessing";
|
2015-04-23 12:53:36 -04:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-16 17:11:01 -04:00
|
|
|
// Utilities for LoadEdgeExpandedGraph to restore my sanity
|
|
|
|
namespace
|
|
|
|
{
|
|
|
|
|
2016-05-20 13:20:12 -04:00
|
|
|
struct Segment final
|
|
|
|
{
|
|
|
|
OSMNodeID from, to;
|
2016-10-18 18:34:06 -04:00
|
|
|
bool operator==(const Segment &other) const
|
|
|
|
{
|
|
|
|
return std::tie(from, to) == std::tie(other.from, other.to);
|
|
|
|
}
|
2016-05-20 13:20:12 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
struct SpeedSource final
|
|
|
|
{
|
|
|
|
unsigned speed;
|
|
|
|
std::uint8_t source;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct SegmentSpeedSource final
|
|
|
|
{
|
|
|
|
Segment segment;
|
|
|
|
SpeedSource speed_source;
|
2016-10-18 18:34:06 -04:00
|
|
|
// < operator is overloaded here to return a > comparison to be used by the
|
|
|
|
// std::lower_bound() call in the find() function
|
|
|
|
bool operator<(const SegmentSpeedSource &other) const
|
|
|
|
{
|
|
|
|
return std::tie(segment.from, segment.to) > std::tie(other.segment.from, other.segment.to);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct Turn final
|
|
|
|
{
|
|
|
|
OSMNodeID from, via, to;
|
|
|
|
bool operator==(const Turn &other) const
|
|
|
|
{
|
|
|
|
return std::tie(from, via, to) == std::tie(other.from, other.via, other.to);
|
|
|
|
}
|
2016-05-20 13:20:12 -04:00
|
|
|
};
|
2016-05-16 17:11:01 -04:00
|
|
|
|
2016-10-18 18:34:06 -04:00
|
|
|
struct PenaltySource final
|
|
|
|
{
|
|
|
|
double penalty;
|
|
|
|
std::uint8_t source;
|
|
|
|
};
|
|
|
|
struct TurnPenaltySource final
|
|
|
|
{
|
|
|
|
Turn segment;
|
|
|
|
PenaltySource penalty_source;
|
|
|
|
// < operator is overloaded here to return a > comparison to be used by the
|
|
|
|
// std::lower_bound() call in the find() function
|
|
|
|
bool operator<(const TurnPenaltySource &other) const
|
|
|
|
{
|
|
|
|
return std::tie(segment.from, segment.via, segment.to) >
|
|
|
|
std::tie(other.segment.from, other.segment.via, other.segment.to);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
using TurnPenaltySourceFlatMap = std::vector<TurnPenaltySource>;
|
2016-05-20 13:20:12 -04:00
|
|
|
using SegmentSpeedSourceFlatMap = std::vector<SegmentSpeedSource>;
|
|
|
|
|
2016-10-21 17:22:12 -04:00
|
|
|
// Find is a binary Search over a flattened key,val Segment storage
|
|
|
|
// It takes the flat map and a Segment/PenaltySource object that has an overloaded
|
|
|
|
// `==` operator, to make the std::lower_bound call work generically
|
2016-10-18 18:34:06 -04:00
|
|
|
template <typename FlatMap, typename SegmentKey>
|
|
|
|
auto find(const FlatMap &map, const SegmentKey &key)
|
2016-05-20 13:20:12 -04:00
|
|
|
{
|
|
|
|
const auto last = end(map);
|
2016-10-18 18:34:06 -04:00
|
|
|
auto it = std::lower_bound(begin(map), last, key);
|
2016-05-20 13:20:12 -04:00
|
|
|
|
2016-10-18 18:34:06 -04:00
|
|
|
if (it != last && (it->segment == key.segment))
|
2016-05-20 13:20:12 -04:00
|
|
|
return it;
|
|
|
|
|
|
|
|
return last;
|
|
|
|
}
|
|
|
|
|
2016-05-16 17:11:01 -04:00
|
|
|
// Functions for parsing files and creating lookup tables
|
|
|
|
|
2016-05-20 13:20:12 -04:00
|
|
|
SegmentSpeedSourceFlatMap
|
2016-05-16 17:11:01 -04:00
|
|
|
parse_segment_lookup_from_csv_files(const std::vector<std::string> &segment_speed_filenames)
|
|
|
|
{
|
2016-05-18 10:59:52 -04:00
|
|
|
// TODO: shares code with turn penalty lookup parse function
|
2016-05-20 13:20:12 -04:00
|
|
|
|
|
|
|
using Mutex = tbb::spin_mutex;
|
|
|
|
|
|
|
|
// Loaded and parsed in parallel, at the end we combine results in a flattened map-ish view
|
|
|
|
SegmentSpeedSourceFlatMap flatten;
|
|
|
|
Mutex flatten_mutex;
|
2016-05-16 17:11:01 -04:00
|
|
|
|
2016-05-16 18:15:15 -04:00
|
|
|
const auto parse_segment_speed_file = [&](const std::size_t idx) {
|
|
|
|
const auto file_id = idx + 1; // starts at one, zero means we assigned the weight
|
|
|
|
const auto filename = segment_speed_filenames[idx];
|
|
|
|
|
2016-11-30 22:08:01 -05:00
|
|
|
storage::io::FileReader segment_speed_file_reader(
|
|
|
|
filename, storage::io::FileReader::HasNoFingerprint);
|
2016-05-18 10:59:52 -04:00
|
|
|
|
2016-05-20 13:20:12 -04:00
|
|
|
SegmentSpeedSourceFlatMap local;
|
|
|
|
|
2016-05-16 17:11:01 -04:00
|
|
|
std::uint64_t from_node_id{};
|
|
|
|
std::uint64_t to_node_id{};
|
|
|
|
unsigned speed{};
|
2016-05-18 10:59:52 -04:00
|
|
|
|
2016-12-06 15:30:46 -05:00
|
|
|
std::size_t line_number = 0;
|
|
|
|
|
2016-11-30 22:08:01 -05:00
|
|
|
std::for_each(
|
|
|
|
segment_speed_file_reader.GetLineIteratorBegin(),
|
|
|
|
segment_speed_file_reader.GetLineIteratorEnd(),
|
|
|
|
[&](const std::string &line) {
|
2016-12-06 15:30:46 -05:00
|
|
|
++line_number;
|
2016-05-18 10:59:52 -04:00
|
|
|
|
2016-11-30 22:08:01 -05:00
|
|
|
using namespace boost::spirit::qi;
|
2016-05-18 10:59:52 -04:00
|
|
|
|
2016-11-30 22:08:01 -05:00
|
|
|
auto it = begin(line);
|
|
|
|
const auto last = end(line);
|
2016-05-18 10:59:52 -04:00
|
|
|
|
2016-11-30 22:08:01 -05:00
|
|
|
// The ulong_long -> uint64_t will likely break on 32bit platforms
|
|
|
|
const auto ok =
|
|
|
|
parse(it,
|
|
|
|
last, //
|
|
|
|
(ulong_long >> ',' >> ulong_long >> ',' >> uint_ >> *(',' >> *char_)), //
|
|
|
|
from_node_id,
|
|
|
|
to_node_id,
|
|
|
|
speed); //
|
2016-05-18 10:59:52 -04:00
|
|
|
|
2016-11-30 22:08:01 -05:00
|
|
|
if (!ok || it != last)
|
2016-12-06 15:30:46 -05:00
|
|
|
{
|
|
|
|
const std::string message{"Segment speed file " + filename +
|
|
|
|
" malformed on line " + std::to_string(line_number)};
|
|
|
|
throw util::exception(message + SOURCE_REF);
|
|
|
|
}
|
2016-05-20 13:20:12 -04:00
|
|
|
|
2016-11-30 22:08:01 -05:00
|
|
|
SegmentSpeedSource val{{OSMNodeID{from_node_id}, OSMNodeID{to_node_id}},
|
|
|
|
{speed, static_cast<std::uint8_t>(file_id)}};
|
|
|
|
|
|
|
|
local.push_back(std::move(val));
|
|
|
|
});
|
2016-05-20 13:20:12 -04:00
|
|
|
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "Loaded speed file " << filename << " with " << local.size() << " speeds";
|
2016-05-20 16:00:52 -04:00
|
|
|
|
2016-05-20 13:20:12 -04:00
|
|
|
{
|
|
|
|
Mutex::scoped_lock _{flatten_mutex};
|
|
|
|
|
2016-05-27 15:05:04 -04:00
|
|
|
flatten.insert(end(flatten),
|
|
|
|
std::make_move_iterator(begin(local)),
|
2016-05-20 13:20:12 -04:00
|
|
|
std::make_move_iterator(end(local)));
|
2016-05-16 17:11:01 -04:00
|
|
|
}
|
2016-05-16 18:15:15 -04:00
|
|
|
};
|
|
|
|
|
2016-12-06 15:30:46 -05:00
|
|
|
try
|
|
|
|
{
|
|
|
|
tbb::parallel_for(std::size_t{0}, segment_speed_filenames.size(), parse_segment_speed_file);
|
|
|
|
}
|
|
|
|
catch (const tbb::captured_exception &e)
|
|
|
|
{
|
|
|
|
throw util::exception(e.what() + SOURCE_REF);
|
|
|
|
}
|
2016-05-16 17:11:01 -04:00
|
|
|
|
2016-05-20 13:20:12 -04:00
|
|
|
// With flattened map-ish view of all the files, sort and unique them on from,to,source
|
|
|
|
// The greater '>' is used here since we want to give files later on higher precedence
|
|
|
|
const auto sort_by = [](const SegmentSpeedSource &lhs, const SegmentSpeedSource &rhs) {
|
|
|
|
return std::tie(lhs.segment.from, lhs.segment.to, lhs.speed_source.source) >
|
|
|
|
std::tie(rhs.segment.from, rhs.segment.to, rhs.speed_source.source);
|
|
|
|
};
|
|
|
|
|
|
|
|
std::stable_sort(begin(flatten), end(flatten), sort_by);
|
|
|
|
|
|
|
|
// Unique only on from,to to take the source precedence into account and remove duplicates
|
|
|
|
const auto unique_by = [](const SegmentSpeedSource &lhs, const SegmentSpeedSource &rhs) {
|
|
|
|
return std::tie(lhs.segment.from, lhs.segment.to) ==
|
|
|
|
std::tie(rhs.segment.from, rhs.segment.to);
|
|
|
|
};
|
|
|
|
|
|
|
|
const auto it = std::unique(begin(flatten), end(flatten), unique_by);
|
|
|
|
|
|
|
|
flatten.erase(it, end(flatten));
|
|
|
|
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "In total loaded " << segment_speed_filenames.size()
|
|
|
|
<< " speed file(s) with a total of " << flatten.size() << " unique values";
|
2016-05-20 16:00:52 -04:00
|
|
|
|
2016-05-20 13:20:12 -04:00
|
|
|
return flatten;
|
2016-05-16 17:11:01 -04:00
|
|
|
}
|
|
|
|
|
2016-10-18 18:34:06 -04:00
|
|
|
TurnPenaltySourceFlatMap
|
2016-05-16 17:11:01 -04:00
|
|
|
parse_turn_penalty_lookup_from_csv_files(const std::vector<std::string> &turn_penalty_filenames)
|
|
|
|
{
|
2016-10-18 18:34:06 -04:00
|
|
|
using Mutex = tbb::spin_mutex;
|
|
|
|
|
2016-05-18 10:59:52 -04:00
|
|
|
// TODO: shares code with turn penalty lookup parse function
|
2016-10-18 18:34:06 -04:00
|
|
|
TurnPenaltySourceFlatMap map;
|
|
|
|
Mutex flatten_mutex;
|
2016-05-16 17:11:01 -04:00
|
|
|
|
2016-05-16 18:15:15 -04:00
|
|
|
const auto parse_turn_penalty_file = [&](const std::size_t idx) {
|
|
|
|
const auto file_id = idx + 1; // starts at one, zero means we assigned the weight
|
|
|
|
const auto filename = turn_penalty_filenames[idx];
|
|
|
|
|
2016-11-30 22:08:01 -05:00
|
|
|
storage::io::FileReader turn_penalty_file_reader(filename,
|
|
|
|
storage::io::FileReader::HasNoFingerprint);
|
2016-10-18 18:34:06 -04:00
|
|
|
TurnPenaltySourceFlatMap local;
|
|
|
|
|
2016-05-16 17:11:01 -04:00
|
|
|
std::uint64_t from_node_id{};
|
|
|
|
std::uint64_t via_node_id{};
|
|
|
|
std::uint64_t to_node_id{};
|
|
|
|
double penalty{};
|
2016-05-18 10:59:52 -04:00
|
|
|
|
2016-12-06 15:30:46 -05:00
|
|
|
std::size_t line_number = 0;
|
|
|
|
|
2016-11-30 22:08:01 -05:00
|
|
|
std::for_each(
|
|
|
|
turn_penalty_file_reader.GetLineIteratorBegin(),
|
|
|
|
turn_penalty_file_reader.GetLineIteratorEnd(),
|
|
|
|
[&](const std::string &line) {
|
2016-12-06 15:30:46 -05:00
|
|
|
++line_number;
|
2016-11-30 22:08:01 -05:00
|
|
|
|
|
|
|
using namespace boost::spirit::qi;
|
|
|
|
|
|
|
|
auto it = begin(line);
|
|
|
|
const auto last = end(line);
|
|
|
|
|
|
|
|
// The ulong_long -> uint64_t will likely break on 32bit platforms
|
|
|
|
const auto ok = parse(it,
|
|
|
|
last, //
|
|
|
|
(ulong_long >> ',' >> ulong_long >> ',' >> ulong_long >>
|
|
|
|
',' >> double_ >> *(',' >> *char_)), //
|
|
|
|
from_node_id,
|
|
|
|
via_node_id,
|
|
|
|
to_node_id,
|
|
|
|
penalty); //
|
|
|
|
|
|
|
|
if (!ok || it != last)
|
2016-12-06 15:30:46 -05:00
|
|
|
{
|
|
|
|
const std::string message{"Turn penalty file " + filename +
|
|
|
|
" malformed on line " + std::to_string(line_number)};
|
|
|
|
throw util::exception(message + SOURCE_REF);
|
|
|
|
}
|
2016-11-30 22:08:01 -05:00
|
|
|
|
|
|
|
TurnPenaltySource val{
|
|
|
|
{OSMNodeID{from_node_id}, OSMNodeID{via_node_id}, OSMNodeID{to_node_id}},
|
|
|
|
{penalty, static_cast<std::uint8_t>(file_id)}};
|
|
|
|
local.push_back(std::move(val));
|
|
|
|
});
|
2016-10-18 18:34:06 -04:00
|
|
|
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "Loaded penalty file " << filename << " with " << local.size()
|
|
|
|
<< " turn penalties";
|
2016-10-18 18:34:06 -04:00
|
|
|
|
|
|
|
{
|
|
|
|
Mutex::scoped_lock _{flatten_mutex};
|
|
|
|
|
|
|
|
map.insert(end(map),
|
|
|
|
std::make_move_iterator(begin(local)),
|
|
|
|
std::make_move_iterator(end(local)));
|
2016-05-16 17:11:01 -04:00
|
|
|
}
|
2016-05-16 18:15:15 -04:00
|
|
|
};
|
|
|
|
|
2016-12-06 15:30:46 -05:00
|
|
|
try
|
|
|
|
{
|
|
|
|
tbb::parallel_for(std::size_t{0}, turn_penalty_filenames.size(), parse_turn_penalty_file);
|
|
|
|
}
|
|
|
|
catch (const tbb::captured_exception &e)
|
|
|
|
{
|
|
|
|
throw util::exception(e.what() + SOURCE_REF);
|
|
|
|
}
|
2016-05-16 17:11:01 -04:00
|
|
|
|
2016-10-20 16:01:35 -04:00
|
|
|
// With flattened map-ish view of all the files, sort and unique them on from,to,source
|
|
|
|
// The greater '>' is used here since we want to give files later on higher precedence
|
|
|
|
const auto sort_by = [](const TurnPenaltySource &lhs, const TurnPenaltySource &rhs) {
|
2016-10-21 18:24:55 -04:00
|
|
|
return std::tie(
|
|
|
|
lhs.segment.from, lhs.segment.via, lhs.segment.to, lhs.penalty_source.source) >
|
|
|
|
std::tie(
|
|
|
|
rhs.segment.from, rhs.segment.via, rhs.segment.to, rhs.penalty_source.source);
|
2016-10-20 16:01:35 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
std::stable_sort(begin(map), end(map), sort_by);
|
|
|
|
|
|
|
|
// Unique only on from,to to take the source precedence into account and remove duplicates
|
|
|
|
const auto unique_by = [](const TurnPenaltySource &lhs, const TurnPenaltySource &rhs) {
|
|
|
|
return std::tie(lhs.segment.from, lhs.segment.via, lhs.segment.to) ==
|
|
|
|
std::tie(rhs.segment.from, rhs.segment.via, rhs.segment.to);
|
|
|
|
};
|
|
|
|
|
|
|
|
const auto it = std::unique(begin(map), end(map), unique_by);
|
|
|
|
|
|
|
|
map.erase(it, end(map));
|
|
|
|
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "In total loaded " << turn_penalty_filenames.size()
|
|
|
|
<< " turn penalty file(s) with a total of " << map.size() << " unique values";
|
2016-10-20 16:01:35 -04:00
|
|
|
|
2016-05-16 17:11:01 -04:00
|
|
|
return map;
|
|
|
|
}
|
|
|
|
} // anon ns
|
|
|
|
|
2016-06-11 11:23:29 -04:00
|
|
|
EdgeID Contractor::LoadEdgeExpandedGraph(
|
2016-01-07 19:31:57 -05:00
|
|
|
std::string const &edge_based_graph_filename,
|
|
|
|
util::DeallocatingVector<extractor::EdgeBasedEdge> &edge_based_edge_list,
|
|
|
|
const std::string &edge_segment_lookup_filename,
|
|
|
|
const std::string &edge_penalty_filename,
|
2016-03-15 02:03:19 -04:00
|
|
|
const std::vector<std::string> &segment_speed_filenames,
|
2016-04-29 03:48:13 -04:00
|
|
|
const std::vector<std::string> &turn_penalty_filenames,
|
2016-01-29 20:52:20 -05:00
|
|
|
const std::string &nodes_filename,
|
|
|
|
const std::string &geometry_filename,
|
2016-03-15 02:03:19 -04:00
|
|
|
const std::string &datasource_names_filename,
|
|
|
|
const std::string &datasource_indexes_filename,
|
2016-09-12 12:16:56 -04:00
|
|
|
const std::string &rtree_leaf_filename,
|
|
|
|
const double log_edge_updates_factor)
|
2015-07-04 11:37:24 -04:00
|
|
|
{
|
2016-05-16 17:11:01 -04:00
|
|
|
if (segment_speed_filenames.size() > 255 || turn_penalty_filenames.size() > 255)
|
2016-12-06 15:30:46 -05:00
|
|
|
throw util::exception("Limit of 255 segment speed and turn penalty files each reached" +
|
|
|
|
SOURCE_REF);
|
2016-05-16 17:11:01 -04:00
|
|
|
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "Opening " << edge_based_graph_filename;
|
2016-06-24 01:01:37 -04:00
|
|
|
|
|
|
|
auto mmap_file = [](const std::string &filename) {
|
|
|
|
using boost::interprocess::file_mapping;
|
|
|
|
using boost::interprocess::mapped_region;
|
|
|
|
using boost::interprocess::read_only;
|
|
|
|
|
2016-12-07 14:42:13 -05:00
|
|
|
try
|
|
|
|
{
|
|
|
|
const file_mapping mapping{filename.c_str(), read_only};
|
|
|
|
mapped_region region{mapping, read_only};
|
|
|
|
region.advise(mapped_region::advice_sequential);
|
|
|
|
return region;
|
|
|
|
}
|
|
|
|
catch (const std::exception &e)
|
|
|
|
{
|
|
|
|
util::Log(logERROR) << "Error while trying to mmap " + filename + ": " + e.what();
|
|
|
|
throw;
|
|
|
|
}
|
2016-06-24 01:01:37 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
const auto edge_based_graph_region = mmap_file(edge_based_graph_filename);
|
2015-10-14 18:08:22 -04:00
|
|
|
|
2016-03-15 02:03:19 -04:00
|
|
|
const bool update_edge_weights = !segment_speed_filenames.empty();
|
2016-04-29 03:48:13 -04:00
|
|
|
const bool update_turn_penalties = !turn_penalty_filenames.empty();
|
2015-10-14 18:08:22 -04:00
|
|
|
|
2016-06-24 01:01:37 -04:00
|
|
|
const auto edge_penalty_region = [&] {
|
|
|
|
if (update_edge_weights || update_turn_penalties)
|
|
|
|
{
|
|
|
|
return mmap_file(edge_penalty_filename);
|
|
|
|
}
|
|
|
|
return boost::interprocess::mapped_region();
|
|
|
|
}();
|
2015-10-14 18:08:22 -04:00
|
|
|
|
2016-06-24 01:01:37 -04:00
|
|
|
const auto edge_segment_region = [&] {
|
|
|
|
if (update_edge_weights || update_turn_penalties)
|
2015-10-14 18:08:22 -04:00
|
|
|
{
|
2016-06-24 01:01:37 -04:00
|
|
|
return mmap_file(edge_segment_lookup_filename);
|
2015-10-14 18:08:22 -04:00
|
|
|
}
|
2016-06-24 01:01:37 -04:00
|
|
|
return boost::interprocess::mapped_region();
|
|
|
|
}();
|
|
|
|
|
2016-07-26 09:00:58 -04:00
|
|
|
// Set the struct packing to 1 byte word sizes. This prevents any padding. We only use
|
|
|
|
// this struct once, so any alignment penalty is trivial. If this is *not* done, then
|
|
|
|
// the struct will be padded out by an extra 4 bytes, and sizeof() will mean we read
|
|
|
|
// too much data from the original file.
|
|
|
|
#pragma pack(push, r1, 1)
|
|
|
|
struct EdgeBasedGraphHeader
|
|
|
|
{
|
2016-06-24 01:01:37 -04:00
|
|
|
util::FingerPrint fingerprint;
|
|
|
|
std::uint64_t number_of_edges;
|
|
|
|
EdgeID max_edge_id;
|
|
|
|
};
|
2016-07-26 09:00:58 -04:00
|
|
|
#pragma pack(pop, r1)
|
2015-10-01 15:47:29 -04:00
|
|
|
|
2016-07-26 09:00:58 -04:00
|
|
|
const EdgeBasedGraphHeader graph_header =
|
|
|
|
*(reinterpret_cast<const EdgeBasedGraphHeader *>(edge_based_graph_region.get_address()));
|
2015-10-01 15:47:29 -04:00
|
|
|
|
2016-06-24 01:01:37 -04:00
|
|
|
const util::FingerPrint fingerprint_valid = util::FingerPrint::GetValid();
|
|
|
|
graph_header.fingerprint.TestContractor(fingerprint_valid);
|
2015-10-01 15:47:29 -04:00
|
|
|
|
2016-06-24 01:01:37 -04:00
|
|
|
edge_based_edge_list.resize(graph_header.number_of_edges);
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "Reading " << graph_header.number_of_edges << " edges from the edge based graph";
|
2015-10-01 15:47:29 -04:00
|
|
|
|
2016-05-20 13:20:12 -04:00
|
|
|
SegmentSpeedSourceFlatMap segment_speed_lookup;
|
2016-10-18 18:34:06 -04:00
|
|
|
TurnPenaltySourceFlatMap turn_penalty_lookup;
|
2016-05-16 17:20:52 -04:00
|
|
|
|
|
|
|
const auto parse_segment_speeds = [&] {
|
2016-04-29 03:48:13 -04:00
|
|
|
if (update_edge_weights)
|
2016-05-16 17:20:52 -04:00
|
|
|
segment_speed_lookup = parse_segment_lookup_from_csv_files(segment_speed_filenames);
|
|
|
|
};
|
2016-03-15 02:03:19 -04:00
|
|
|
|
2016-05-16 17:20:52 -04:00
|
|
|
const auto parse_turn_penalties = [&] {
|
2016-04-29 03:48:13 -04:00
|
|
|
if (update_turn_penalties)
|
2016-05-16 17:20:52 -04:00
|
|
|
turn_penalty_lookup = parse_turn_penalty_lookup_from_csv_files(turn_penalty_filenames);
|
|
|
|
};
|
|
|
|
|
2016-05-16 17:11:01 -04:00
|
|
|
// If we update the edge weights, this file will hold the datasource information for each
|
2016-05-16 18:45:54 -04:00
|
|
|
// segment; the other files will also be conditionally filled concurrently if we make an update
|
2016-05-16 17:11:01 -04:00
|
|
|
std::vector<uint8_t> m_geometry_datasource;
|
|
|
|
|
2016-05-16 18:45:54 -04:00
|
|
|
std::vector<extractor::QueryNode> internal_to_external_node_map;
|
|
|
|
std::vector<unsigned> m_geometry_indices;
|
2016-10-07 00:06:33 -04:00
|
|
|
std::vector<NodeID> m_geometry_node_list;
|
|
|
|
std::vector<EdgeWeight> m_geometry_fwd_weight_list;
|
|
|
|
std::vector<EdgeWeight> m_geometry_rev_weight_list;
|
2016-05-16 18:45:54 -04:00
|
|
|
|
|
|
|
const auto maybe_load_internal_to_external_node_map = [&] {
|
|
|
|
if (!(update_edge_weights || update_turn_penalties))
|
|
|
|
return;
|
|
|
|
|
2016-11-30 22:08:01 -05:00
|
|
|
storage::io::FileReader nodes_file(nodes_filename,
|
|
|
|
storage::io::FileReader::HasNoFingerprint);
|
2016-05-16 18:45:54 -04:00
|
|
|
|
2016-11-30 22:08:01 -05:00
|
|
|
nodes_file.DeserializeVector(internal_to_external_node_map);
|
2016-05-16 18:45:54 -04:00
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
const auto maybe_load_geometries = [&] {
|
|
|
|
if (!(update_edge_weights || update_turn_penalties))
|
|
|
|
return;
|
|
|
|
|
2016-11-30 22:08:01 -05:00
|
|
|
storage::io::FileReader geometry_file(geometry_filename,
|
|
|
|
storage::io::FileReader::HasNoFingerprint);
|
|
|
|
const auto number_of_indices = geometry_file.ReadElementCount32();
|
2016-05-16 18:45:54 -04:00
|
|
|
m_geometry_indices.resize(number_of_indices);
|
2016-11-30 22:08:01 -05:00
|
|
|
geometry_file.ReadInto(m_geometry_indices.data(), number_of_indices);
|
2016-05-16 18:45:54 -04:00
|
|
|
|
2016-11-30 22:08:01 -05:00
|
|
|
const auto number_of_compressed_geometries = geometry_file.ReadElementCount32();
|
2016-05-16 18:45:54 -04:00
|
|
|
|
|
|
|
BOOST_ASSERT(m_geometry_indices.back() == number_of_compressed_geometries);
|
2016-10-07 00:06:33 -04:00
|
|
|
m_geometry_node_list.resize(number_of_compressed_geometries);
|
|
|
|
m_geometry_fwd_weight_list.resize(number_of_compressed_geometries);
|
|
|
|
m_geometry_rev_weight_list.resize(number_of_compressed_geometries);
|
2016-05-16 18:45:54 -04:00
|
|
|
|
|
|
|
if (number_of_compressed_geometries > 0)
|
|
|
|
{
|
2016-11-30 22:08:01 -05:00
|
|
|
geometry_file.ReadInto(m_geometry_node_list.data(), number_of_compressed_geometries);
|
|
|
|
geometry_file.ReadInto(m_geometry_fwd_weight_list.data(),
|
|
|
|
number_of_compressed_geometries);
|
|
|
|
geometry_file.ReadInto(m_geometry_rev_weight_list.data(),
|
|
|
|
number_of_compressed_geometries);
|
2016-05-16 18:45:54 -04:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Folds all our actions into independently concurrently executing lambdas
|
2016-05-27 15:05:04 -04:00
|
|
|
tbb::parallel_invoke(parse_segment_speeds,
|
|
|
|
parse_turn_penalties, //
|
|
|
|
maybe_load_internal_to_external_node_map,
|
|
|
|
maybe_load_geometries);
|
2016-05-16 18:45:54 -04:00
|
|
|
|
2016-05-16 17:11:01 -04:00
|
|
|
if (update_edge_weights || update_turn_penalties)
|
|
|
|
{
|
2016-01-29 20:52:20 -05:00
|
|
|
// Here, we have to update the compressed geometry weights
|
|
|
|
// First, we need the external-to-internal node lookup table
|
2016-05-16 18:37:46 -04:00
|
|
|
|
2016-03-15 02:03:19 -04:00
|
|
|
// This is a list of the "data source id" for every segment in the compressed
|
|
|
|
// geometry container. We assume that everything so far has come from the
|
|
|
|
// profile (data source 0). Here, we replace the 0's with the index of the
|
|
|
|
// CSV file that supplied the value that gets used for that segment, then
|
|
|
|
// we write out this list so that it can be returned by the debugging
|
|
|
|
// vector tiles later on.
|
2016-10-07 00:06:33 -04:00
|
|
|
m_geometry_datasource.resize(m_geometry_fwd_weight_list.size(), 0);
|
2016-03-15 02:03:19 -04:00
|
|
|
|
2016-01-29 20:52:20 -05:00
|
|
|
// Now, we iterate over all the segments stored in the StaticRTree, updating
|
|
|
|
// the packed geometry weights in the `.geometries` file (note: we do not
|
|
|
|
// update the RTree itself, we just use the leaf nodes to iterate over all segments)
|
2016-05-17 13:50:29 -04:00
|
|
|
using LeafNode = util::StaticRTree<extractor::EdgeBasedNode>::LeafNode;
|
2016-01-29 20:52:20 -05:00
|
|
|
|
2016-05-17 15:09:54 -04:00
|
|
|
using boost::interprocess::mapped_region;
|
2016-01-29 20:52:20 -05:00
|
|
|
|
2016-06-24 01:01:37 -04:00
|
|
|
auto region = mmap_file(rtree_leaf_filename.c_str());
|
2016-05-17 15:09:54 -04:00
|
|
|
region.advise(mapped_region::advice_willneed);
|
2016-01-29 20:52:20 -05:00
|
|
|
|
2016-05-17 15:09:54 -04:00
|
|
|
const auto bytes = region.get_size();
|
|
|
|
const auto first = static_cast<const LeafNode *>(region.get_address());
|
|
|
|
const auto last = first + (bytes / sizeof(LeafNode));
|
|
|
|
|
2016-05-20 16:00:52 -04:00
|
|
|
// vector to count used speeds for logging
|
|
|
|
// size offset by one since index 0 is used for speeds not from external file
|
2016-05-28 11:36:31 -04:00
|
|
|
using counters_type = std::vector<std::size_t>;
|
|
|
|
std::size_t num_counters = segment_speed_filenames.size() + 1;
|
|
|
|
tbb::enumerable_thread_specific<counters_type> segment_speeds_counters(
|
|
|
|
counters_type(num_counters, 0));
|
2016-05-26 11:47:35 -04:00
|
|
|
const constexpr auto LUA_SOURCE = 0;
|
2016-05-20 16:00:52 -04:00
|
|
|
|
2016-05-17 15:09:54 -04:00
|
|
|
tbb::parallel_for_each(first, last, [&](const LeafNode ¤t_node) {
|
2016-05-28 11:36:31 -04:00
|
|
|
auto &counters = segment_speeds_counters.local();
|
2016-05-17 13:50:29 -04:00
|
|
|
for (size_t i = 0; i < current_node.object_count; i++)
|
2016-01-29 20:52:20 -05:00
|
|
|
{
|
2016-05-17 15:09:54 -04:00
|
|
|
const auto &leaf_object = current_node.objects[i];
|
2016-05-17 13:50:29 -04:00
|
|
|
extractor::QueryNode *u;
|
|
|
|
extractor::QueryNode *v;
|
2016-01-29 20:52:20 -05:00
|
|
|
|
2016-07-22 12:23:54 -04:00
|
|
|
const unsigned forward_begin =
|
|
|
|
m_geometry_indices.at(leaf_object.packed_geometry_id);
|
2016-10-07 00:06:33 -04:00
|
|
|
const auto current_fwd_weight =
|
|
|
|
m_geometry_fwd_weight_list[forward_begin + leaf_object.fwd_segment_position];
|
2016-07-22 12:23:54 -04:00
|
|
|
|
2016-10-07 00:06:33 -04:00
|
|
|
u = &(internal_to_external_node_map
|
2016-10-19 20:11:36 -04:00
|
|
|
[m_geometry_node_list[forward_begin + leaf_object.fwd_segment_position]]);
|
2016-07-22 12:23:54 -04:00
|
|
|
v = &(internal_to_external_node_map
|
2016-10-19 20:11:36 -04:00
|
|
|
[m_geometry_node_list[forward_begin + leaf_object.fwd_segment_position +
|
|
|
|
1]]);
|
2016-07-22 12:23:54 -04:00
|
|
|
|
|
|
|
const double segment_length = util::coordinate_calculation::greatCircleDistance(
|
|
|
|
util::Coordinate{u->lon, u->lat}, util::Coordinate{v->lon, v->lat});
|
|
|
|
|
2016-10-18 18:34:06 -04:00
|
|
|
auto forward_speed_iter = find(
|
|
|
|
segment_speed_lookup, SegmentSpeedSource{{u->node_id, v->node_id}, {0, 0}});
|
2016-07-22 12:23:54 -04:00
|
|
|
if (forward_speed_iter != segment_speed_lookup.end())
|
2016-01-29 20:52:20 -05:00
|
|
|
{
|
2016-07-22 12:23:54 -04:00
|
|
|
const auto new_segment_weight = getNewWeight(forward_speed_iter,
|
|
|
|
segment_length,
|
|
|
|
segment_speed_filenames,
|
2016-10-07 00:06:33 -04:00
|
|
|
current_fwd_weight,
|
2016-07-22 12:23:54 -04:00
|
|
|
log_edge_updates_factor);
|
|
|
|
|
2016-10-19 20:11:36 -04:00
|
|
|
m_geometry_fwd_weight_list[forward_begin + 1 +
|
|
|
|
leaf_object.fwd_segment_position] =
|
2016-10-07 00:06:33 -04:00
|
|
|
new_segment_weight;
|
2016-07-22 12:23:54 -04:00
|
|
|
m_geometry_datasource[forward_begin + 1 + leaf_object.fwd_segment_position] =
|
|
|
|
forward_speed_iter->speed_source.source;
|
|
|
|
|
|
|
|
// count statistics for logging
|
|
|
|
counters[forward_speed_iter->speed_source.source] += 1;
|
2016-05-17 13:50:29 -04:00
|
|
|
}
|
2016-07-22 12:23:54 -04:00
|
|
|
else
|
2016-05-17 13:50:29 -04:00
|
|
|
{
|
2016-07-22 12:23:54 -04:00
|
|
|
// count statistics for logging
|
|
|
|
counters[LUA_SOURCE] += 1;
|
|
|
|
}
|
2016-05-17 13:50:29 -04:00
|
|
|
|
2016-10-07 00:06:33 -04:00
|
|
|
const auto current_rev_weight =
|
|
|
|
m_geometry_rev_weight_list[forward_begin + leaf_object.fwd_segment_position];
|
|
|
|
|
2016-10-18 18:34:06 -04:00
|
|
|
const auto reverse_speed_iter = find(
|
|
|
|
segment_speed_lookup, SegmentSpeedSource{{v->node_id, u->node_id}, {0, 0}});
|
|
|
|
|
2016-07-22 12:23:54 -04:00
|
|
|
if (reverse_speed_iter != segment_speed_lookup.end())
|
|
|
|
{
|
|
|
|
const auto new_segment_weight = getNewWeight(reverse_speed_iter,
|
|
|
|
segment_length,
|
|
|
|
segment_speed_filenames,
|
2016-10-07 00:06:33 -04:00
|
|
|
current_rev_weight,
|
2016-07-22 12:23:54 -04:00
|
|
|
log_edge_updates_factor);
|
2016-10-07 00:06:33 -04:00
|
|
|
m_geometry_rev_weight_list[forward_begin + leaf_object.fwd_segment_position] =
|
|
|
|
new_segment_weight;
|
2016-07-22 12:23:54 -04:00
|
|
|
m_geometry_datasource[forward_begin + leaf_object.fwd_segment_position] =
|
|
|
|
reverse_speed_iter->speed_source.source;
|
|
|
|
|
|
|
|
// count statistics for logging
|
|
|
|
counters[reverse_speed_iter->speed_source.source] += 1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
counters[LUA_SOURCE] += 1;
|
2016-01-29 20:52:20 -05:00
|
|
|
}
|
|
|
|
}
|
2016-05-17 15:09:54 -04:00
|
|
|
}); // parallel_for_each
|
2016-05-20 16:00:52 -04:00
|
|
|
|
2016-05-28 11:36:31 -04:00
|
|
|
counters_type merged_counters(num_counters, 0);
|
|
|
|
for (const auto &counters : segment_speeds_counters)
|
|
|
|
{
|
|
|
|
for (std::size_t i = 0; i < counters.size(); i++)
|
|
|
|
{
|
|
|
|
merged_counters[i] += counters[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (std::size_t i = 0; i < merged_counters.size(); i++)
|
2016-05-20 16:00:52 -04:00
|
|
|
{
|
|
|
|
if (i == LUA_SOURCE)
|
|
|
|
{
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "Used " << merged_counters[LUA_SOURCE]
|
|
|
|
<< " speeds from LUA profile or input map";
|
2016-05-20 16:00:52 -04:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// segments_speeds_counters has 0 as LUA, segment_speed_filenames not, thus we need
|
|
|
|
// to susbstract 1 to avoid off-by-one error
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "Used " << merged_counters[i] << " speeds from "
|
|
|
|
<< segment_speed_filenames[i - 1];
|
2016-05-20 16:00:52 -04:00
|
|
|
}
|
|
|
|
}
|
2016-05-17 13:50:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
const auto maybe_save_geometries = [&] {
|
|
|
|
if (!(update_edge_weights || update_turn_penalties))
|
|
|
|
return;
|
2016-02-23 15:23:13 -05:00
|
|
|
|
2016-01-29 20:52:20 -05:00
|
|
|
// Now save out the updated compressed geometries
|
2016-05-17 13:50:29 -04:00
|
|
|
std::ofstream geometry_stream(geometry_filename, std::ios::binary);
|
|
|
|
if (!geometry_stream)
|
2016-01-29 20:52:20 -05:00
|
|
|
{
|
2016-12-06 15:30:46 -05:00
|
|
|
const std::string message{"Failed to open " + geometry_filename + " for writing"};
|
|
|
|
throw util::exception(message + SOURCE_REF);
|
2016-01-29 20:52:20 -05:00
|
|
|
}
|
2016-05-17 13:50:29 -04:00
|
|
|
const unsigned number_of_indices = m_geometry_indices.size();
|
2016-10-07 00:06:33 -04:00
|
|
|
const unsigned number_of_compressed_geometries = m_geometry_node_list.size();
|
2016-05-17 13:50:29 -04:00
|
|
|
geometry_stream.write(reinterpret_cast<const char *>(&number_of_indices), sizeof(unsigned));
|
|
|
|
geometry_stream.write(reinterpret_cast<char *>(&(m_geometry_indices[0])),
|
|
|
|
number_of_indices * sizeof(unsigned));
|
|
|
|
geometry_stream.write(reinterpret_cast<const char *>(&number_of_compressed_geometries),
|
|
|
|
sizeof(unsigned));
|
2016-10-07 00:06:33 -04:00
|
|
|
geometry_stream.write(reinterpret_cast<char *>(&(m_geometry_node_list[0])),
|
2016-10-19 20:11:36 -04:00
|
|
|
number_of_compressed_geometries * sizeof(NodeID));
|
2016-10-07 00:06:33 -04:00
|
|
|
geometry_stream.write(reinterpret_cast<char *>(&(m_geometry_fwd_weight_list[0])),
|
2016-10-19 20:11:36 -04:00
|
|
|
number_of_compressed_geometries * sizeof(EdgeWeight));
|
2016-10-07 00:06:33 -04:00
|
|
|
geometry_stream.write(reinterpret_cast<char *>(&(m_geometry_rev_weight_list[0])),
|
2016-10-19 20:11:36 -04:00
|
|
|
number_of_compressed_geometries * sizeof(EdgeWeight));
|
2016-05-17 13:50:29 -04:00
|
|
|
};
|
2016-03-15 02:03:19 -04:00
|
|
|
|
2016-05-17 13:50:29 -04:00
|
|
|
const auto save_datasource_indexes = [&] {
|
2016-03-22 14:30:18 -04:00
|
|
|
std::ofstream datasource_stream(datasource_indexes_filename, std::ios::binary);
|
|
|
|
if (!datasource_stream)
|
|
|
|
{
|
2016-12-06 15:30:46 -05:00
|
|
|
const std::string message{"Failed to open " + datasource_indexes_filename +
|
|
|
|
" for writing"};
|
|
|
|
throw util::exception(message + SOURCE_REF);
|
2016-03-22 14:30:18 -04:00
|
|
|
}
|
2016-06-11 11:23:29 -04:00
|
|
|
std::uint64_t number_of_datasource_entries = m_geometry_datasource.size();
|
2016-03-22 14:30:18 -04:00
|
|
|
datasource_stream.write(reinterpret_cast<const char *>(&number_of_datasource_entries),
|
|
|
|
sizeof(number_of_datasource_entries));
|
|
|
|
if (number_of_datasource_entries > 0)
|
2016-03-15 02:03:19 -04:00
|
|
|
{
|
|
|
|
datasource_stream.write(reinterpret_cast<char *>(&(m_geometry_datasource[0])),
|
|
|
|
number_of_datasource_entries * sizeof(uint8_t));
|
|
|
|
}
|
2016-05-17 13:50:29 -04:00
|
|
|
};
|
2016-03-15 02:03:19 -04:00
|
|
|
|
2016-05-17 13:50:29 -04:00
|
|
|
const auto save_datastore_names = [&] {
|
2016-03-22 14:30:18 -04:00
|
|
|
std::ofstream datasource_stream(datasource_names_filename, std::ios::binary);
|
|
|
|
if (!datasource_stream)
|
2016-03-15 02:03:19 -04:00
|
|
|
{
|
2016-12-06 15:30:46 -05:00
|
|
|
const std::string message{"Failed to open " + datasource_names_filename +
|
|
|
|
" for writing"};
|
|
|
|
throw util::exception(message + SOURCE_REF);
|
2016-03-22 14:30:18 -04:00
|
|
|
}
|
|
|
|
datasource_stream << "lua profile" << std::endl;
|
|
|
|
for (auto const &name : segment_speed_filenames)
|
|
|
|
{
|
2016-05-23 18:10:50 -04:00
|
|
|
// Only write the filename, without path or extension.
|
|
|
|
// This prevents information leakage, and keeps names short
|
|
|
|
// for rendering in the debug tiles.
|
|
|
|
const boost::filesystem::path p(name);
|
|
|
|
datasource_stream << p.stem().string() << std::endl;
|
2016-03-15 02:03:19 -04:00
|
|
|
}
|
2016-05-17 13:50:29 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
tbb::parallel_invoke(maybe_save_geometries, save_datasource_indexes, save_datastore_names);
|
2015-10-14 18:08:22 -04:00
|
|
|
|
2016-07-26 09:00:58 -04:00
|
|
|
auto penaltyblock = reinterpret_cast<const extractor::lookup::PenaltyBlock *>(
|
|
|
|
edge_penalty_region.get_address());
|
2016-06-24 01:01:37 -04:00
|
|
|
auto edge_segment_byte_ptr = reinterpret_cast<const char *>(edge_segment_region.get_address());
|
|
|
|
auto edge_based_edge_ptr = reinterpret_cast<extractor::EdgeBasedEdge *>(
|
2016-07-26 09:00:58 -04:00
|
|
|
reinterpret_cast<char *>(edge_based_graph_region.get_address()) +
|
|
|
|
sizeof(EdgeBasedGraphHeader));
|
2016-06-24 01:01:37 -04:00
|
|
|
|
|
|
|
const auto edge_based_edge_last = reinterpret_cast<extractor::EdgeBasedEdge *>(
|
2016-07-26 09:00:58 -04:00
|
|
|
reinterpret_cast<char *>(edge_based_graph_region.get_address()) +
|
|
|
|
sizeof(EdgeBasedGraphHeader) +
|
|
|
|
sizeof(extractor::EdgeBasedEdge) * graph_header.number_of_edges);
|
2016-06-24 01:01:37 -04:00
|
|
|
|
|
|
|
while (edge_based_edge_ptr != edge_based_edge_last)
|
2015-10-14 18:08:22 -04:00
|
|
|
{
|
2016-06-24 01:01:37 -04:00
|
|
|
// Make a copy of the data from the memory map
|
|
|
|
extractor::EdgeBasedEdge inbuffer = *edge_based_edge_ptr;
|
|
|
|
edge_based_edge_ptr++;
|
|
|
|
|
2016-04-29 03:48:13 -04:00
|
|
|
if (update_edge_weights || update_turn_penalties)
|
2015-10-14 18:08:22 -04:00
|
|
|
{
|
2016-07-28 17:09:55 -04:00
|
|
|
bool skip_this_edge = false;
|
2016-06-24 01:01:37 -04:00
|
|
|
auto header = reinterpret_cast<const extractor::lookup::SegmentHeaderBlock *>(
|
|
|
|
edge_segment_byte_ptr);
|
|
|
|
edge_segment_byte_ptr += sizeof(extractor::lookup::SegmentHeaderBlock);
|
2015-10-14 18:08:22 -04:00
|
|
|
|
2016-06-24 01:01:37 -04:00
|
|
|
auto previous_osm_node_id = header->previous_osm_node_id;
|
2016-05-12 12:50:10 -04:00
|
|
|
EdgeWeight new_weight = 0;
|
2016-06-24 01:01:37 -04:00
|
|
|
int compressed_edge_nodes = static_cast<int>(header->num_osm_nodes);
|
|
|
|
|
|
|
|
auto segmentblocks =
|
|
|
|
reinterpret_cast<const extractor::lookup::SegmentBlock *>(edge_segment_byte_ptr);
|
|
|
|
edge_segment_byte_ptr +=
|
|
|
|
sizeof(extractor::lookup::SegmentBlock) * (header->num_osm_nodes - 1);
|
2015-10-14 18:08:22 -04:00
|
|
|
|
2016-06-24 01:01:37 -04:00
|
|
|
const auto num_segments = header->num_osm_nodes - 1;
|
|
|
|
for (auto i : util::irange<std::size_t>(0, num_segments))
|
2015-10-14 18:08:22 -04:00
|
|
|
{
|
2016-05-20 13:20:12 -04:00
|
|
|
auto speed_iter =
|
2016-06-24 01:01:37 -04:00
|
|
|
find(segment_speed_lookup,
|
2016-10-18 18:34:06 -04:00
|
|
|
SegmentSpeedSource{
|
|
|
|
previous_osm_node_id, segmentblocks[i].this_osm_node_id, {0, 0}});
|
2015-10-14 18:08:22 -04:00
|
|
|
if (speed_iter != segment_speed_lookup.end())
|
|
|
|
{
|
2016-07-21 12:57:21 -04:00
|
|
|
if (speed_iter->speed_source.speed > 0)
|
|
|
|
{
|
2016-07-22 12:23:54 -04:00
|
|
|
const auto new_segment_weight = distanceAndSpeedToWeight(
|
2016-07-26 09:00:58 -04:00
|
|
|
segmentblocks[i].segment_length, speed_iter->speed_source.speed);
|
2016-07-21 12:57:21 -04:00
|
|
|
new_weight += new_segment_weight;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2016-07-28 17:09:55 -04:00
|
|
|
// If we hit a 0-speed edge, then it's effectively not traversible.
|
|
|
|
// We don't want to include it in the edge_based_edge_list, so
|
|
|
|
// we set a flag and `continue` the parent loop as soon as we can.
|
|
|
|
// This would be a perfect place to use `goto`, but Patrick vetoed it.
|
|
|
|
skip_this_edge = true;
|
2016-07-21 12:57:21 -04:00
|
|
|
break;
|
|
|
|
}
|
2015-10-14 18:08:22 -04:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// If no lookup found, use the original weight value for this segment
|
2016-06-24 01:01:37 -04:00
|
|
|
new_weight += segmentblocks[i].segment_weight;
|
2015-10-14 18:08:22 -04:00
|
|
|
}
|
|
|
|
|
2016-06-24 01:01:37 -04:00
|
|
|
previous_osm_node_id = segmentblocks[i].this_osm_node_id;
|
2015-10-14 18:08:22 -04:00
|
|
|
}
|
|
|
|
|
2016-07-28 17:09:55 -04:00
|
|
|
// We found a zero-speed edge, so we'll skip this whole edge-based-edge which
|
|
|
|
// effectively removes it from the routing network.
|
2016-07-22 03:08:40 -04:00
|
|
|
if (skip_this_edge)
|
|
|
|
{
|
2016-07-28 17:09:55 -04:00
|
|
|
penaltyblock++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-10-18 18:34:06 -04:00
|
|
|
auto turn_iter =
|
|
|
|
find(turn_penalty_lookup,
|
|
|
|
TurnPenaltySource{
|
|
|
|
penaltyblock->from_id, penaltyblock->via_id, penaltyblock->to_id, {0, 0}});
|
2016-04-29 03:48:13 -04:00
|
|
|
if (turn_iter != turn_penalty_lookup.end())
|
|
|
|
{
|
2016-10-18 18:34:06 -04:00
|
|
|
int new_turn_weight = static_cast<int>(turn_iter->penalty_source.penalty * 10);
|
2016-04-29 03:48:13 -04:00
|
|
|
|
|
|
|
if (new_turn_weight + new_weight < compressed_edge_nodes)
|
|
|
|
{
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log(logWARNING) << "turn penalty " << turn_iter->penalty_source.penalty
|
|
|
|
<< " for turn " << penaltyblock->from_id << ", "
|
|
|
|
<< penaltyblock->via_id << ", " << penaltyblock->to_id
|
|
|
|
<< " is too negative: clamping turn weight to "
|
|
|
|
<< compressed_edge_nodes;
|
2016-04-29 03:48:13 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
inbuffer.weight = std::max(new_turn_weight + new_weight, compressed_edge_nodes);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2016-06-24 01:01:37 -04:00
|
|
|
inbuffer.weight = penaltyblock->fixed_penalty + new_weight;
|
2016-04-29 03:48:13 -04:00
|
|
|
}
|
2016-06-24 01:01:37 -04:00
|
|
|
|
|
|
|
// Increment the pointer
|
|
|
|
penaltyblock++;
|
2015-10-14 18:08:22 -04:00
|
|
|
}
|
|
|
|
|
2015-10-01 15:47:29 -04:00
|
|
|
edge_based_edge_list.emplace_back(std::move(inbuffer));
|
2015-07-04 11:37:24 -04:00
|
|
|
}
|
2015-10-14 18:08:22 -04:00
|
|
|
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "Done reading edges";
|
2016-06-24 01:01:37 -04:00
|
|
|
return graph_header.max_edge_id;
|
2015-07-04 11:37:24 -04:00
|
|
|
}
|
|
|
|
|
2016-01-07 13:19:55 -05:00
|
|
|
void Contractor::ReadNodeLevels(std::vector<float> &node_levels) const
|
2015-11-09 15:14:39 -05:00
|
|
|
{
|
2016-11-30 22:08:01 -05:00
|
|
|
storage::io::FileReader order_file(config.level_output_path,
|
|
|
|
storage::io::FileReader::HasNoFingerprint);
|
2015-11-09 15:14:39 -05:00
|
|
|
|
2016-11-30 22:08:01 -05:00
|
|
|
const auto level_size = order_file.ReadElementCount32();
|
2015-11-09 15:14:39 -05:00
|
|
|
node_levels.resize(level_size);
|
2016-11-30 22:08:01 -05:00
|
|
|
order_file.ReadInto(node_levels);
|
2015-11-09 15:14:39 -05:00
|
|
|
}
|
|
|
|
|
2016-01-07 13:19:55 -05:00
|
|
|
void Contractor::WriteNodeLevels(std::vector<float> &&in_node_levels) const
|
2015-11-09 15:14:39 -05:00
|
|
|
{
|
|
|
|
std::vector<float> node_levels(std::move(in_node_levels));
|
|
|
|
|
2015-10-14 18:08:22 -04:00
|
|
|
boost::filesystem::ofstream order_output_stream(config.level_output_path, std::ios::binary);
|
2015-11-09 15:14:39 -05:00
|
|
|
|
|
|
|
unsigned level_size = node_levels.size();
|
|
|
|
order_output_stream.write((char *)&level_size, sizeof(unsigned));
|
2015-10-14 18:08:22 -04:00
|
|
|
order_output_stream.write((char *)node_levels.data(), sizeof(float) * node_levels.size());
|
2015-11-09 15:14:39 -05:00
|
|
|
}
|
2015-10-01 15:47:29 -04:00
|
|
|
|
2016-01-07 13:19:55 -05:00
|
|
|
void Contractor::WriteCoreNodeMarker(std::vector<bool> &&in_is_core_node) const
|
2015-08-08 09:28:05 -04:00
|
|
|
{
|
2015-11-09 15:14:39 -05:00
|
|
|
std::vector<bool> is_core_node(std::move(in_is_core_node));
|
|
|
|
std::vector<char> unpacked_bool_flags(std::move(is_core_node.size()));
|
2015-08-08 09:28:05 -04:00
|
|
|
for (auto i = 0u; i < is_core_node.size(); ++i)
|
|
|
|
{
|
|
|
|
unpacked_bool_flags[i] = is_core_node[i] ? 1 : 0;
|
|
|
|
}
|
|
|
|
|
2015-09-09 12:34:09 -04:00
|
|
|
boost::filesystem::ofstream core_marker_output_stream(config.core_output_path,
|
|
|
|
std::ios::binary);
|
2015-08-09 12:30:04 -04:00
|
|
|
unsigned size = unpacked_bool_flags.size();
|
|
|
|
core_marker_output_stream.write((char *)&size, sizeof(unsigned));
|
2015-09-09 12:34:09 -04:00
|
|
|
core_marker_output_stream.write((char *)unpacked_bool_flags.data(),
|
|
|
|
sizeof(char) * unpacked_bool_flags.size());
|
2015-08-08 09:28:05 -04:00
|
|
|
}
|
|
|
|
|
2016-01-07 19:31:57 -05:00
|
|
|
std::size_t
|
2016-01-07 13:19:55 -05:00
|
|
|
Contractor::WriteContractedGraph(unsigned max_node_id,
|
2016-02-26 06:29:57 -05:00
|
|
|
const util::DeallocatingVector<QueryEdge> &contracted_edge_list)
|
2015-04-23 12:53:36 -04:00
|
|
|
{
|
|
|
|
// Sorting contracted edges in a way that the static query graph can read some in in-place.
|
2015-09-10 05:04:50 -04:00
|
|
|
tbb::parallel_sort(contracted_edge_list.begin(), contracted_edge_list.end());
|
2016-10-21 18:24:55 -04:00
|
|
|
const std::uint64_t contracted_edge_count = contracted_edge_list.size();
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "Serializing compacted graph of " << contracted_edge_count << " edges";
|
2014-07-03 07:29:15 -04:00
|
|
|
|
2016-01-05 10:51:13 -05:00
|
|
|
const util::FingerPrint fingerprint = util::FingerPrint::GetValid();
|
2015-04-24 08:51:25 -04:00
|
|
|
boost::filesystem::ofstream hsgr_output_stream(config.graph_output_path, std::ios::binary);
|
2016-01-05 10:51:13 -05:00
|
|
|
hsgr_output_stream.write((char *)&fingerprint, sizeof(util::FingerPrint));
|
2016-06-11 11:23:29 -04:00
|
|
|
const NodeID max_used_node_id = [&contracted_edge_list] {
|
|
|
|
NodeID tmp_max = 0;
|
2015-09-09 12:34:09 -04:00
|
|
|
for (const QueryEdge &edge : contracted_edge_list)
|
2014-08-07 06:02:57 -04:00
|
|
|
{
|
|
|
|
BOOST_ASSERT(SPECIAL_NODEID != edge.source);
|
|
|
|
BOOST_ASSERT(SPECIAL_NODEID != edge.target);
|
|
|
|
tmp_max = std::max(tmp_max, edge.source);
|
|
|
|
tmp_max = std::max(tmp_max, edge.target);
|
|
|
|
}
|
|
|
|
return tmp_max;
|
|
|
|
}();
|
2014-07-03 07:29:15 -04:00
|
|
|
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log(logDEBUG) << "input graph has " << (max_node_id + 1) << " nodes";
|
|
|
|
util::Log(logDEBUG) << "contracted graph has " << (max_used_node_id + 1) << " nodes";
|
2014-07-03 07:29:15 -04:00
|
|
|
|
2016-01-05 10:51:13 -05:00
|
|
|
std::vector<util::StaticGraph<EdgeData>::NodeArrayEntry> node_array;
|
2015-07-01 11:55:54 -04:00
|
|
|
// make sure we have at least one sentinel
|
|
|
|
node_array.resize(max_node_id + 2);
|
2014-07-03 07:29:15 -04:00
|
|
|
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "Building node array";
|
2016-01-05 10:51:13 -05:00
|
|
|
util::StaticGraph<EdgeData>::EdgeIterator edge = 0;
|
|
|
|
util::StaticGraph<EdgeData>::EdgeIterator position = 0;
|
|
|
|
util::StaticGraph<EdgeData>::EdgeIterator last_edge;
|
2014-07-03 07:29:15 -04:00
|
|
|
|
|
|
|
// initializing 'first_edge'-field of nodes:
|
2016-01-05 10:51:13 -05:00
|
|
|
for (const auto node : util::irange(0u, max_used_node_id + 1))
|
2014-07-03 07:29:15 -04:00
|
|
|
{
|
|
|
|
last_edge = edge;
|
2015-09-09 12:34:09 -04:00
|
|
|
while ((edge < contracted_edge_count) && (contracted_edge_list[edge].source == node))
|
2014-07-03 07:29:15 -04:00
|
|
|
{
|
|
|
|
++edge;
|
|
|
|
}
|
|
|
|
node_array[node].first_edge = position; //=edge
|
|
|
|
position += edge - last_edge; // remove
|
|
|
|
}
|
|
|
|
|
2015-09-09 12:34:09 -04:00
|
|
|
for (const auto sentinel_counter :
|
2016-01-05 10:51:13 -05:00
|
|
|
util::irange<unsigned>(max_used_node_id + 1, node_array.size()))
|
2014-07-03 07:29:15 -04:00
|
|
|
{
|
|
|
|
// sentinel element, guarded against underflow
|
|
|
|
node_array[sentinel_counter].first_edge = contracted_edge_count;
|
|
|
|
}
|
|
|
|
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "Serializing node array";
|
2014-07-03 07:29:15 -04:00
|
|
|
|
2015-10-01 15:47:29 -04:00
|
|
|
RangebasedCRC32 crc32_calculator;
|
|
|
|
const unsigned edges_crc32 = crc32_calculator(contracted_edge_list);
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "Writing CRC32: " << edges_crc32;
|
2015-10-01 15:47:29 -04:00
|
|
|
|
2016-10-21 18:24:55 -04:00
|
|
|
const std::uint64_t node_array_size = node_array.size();
|
2014-07-03 07:29:15 -04:00
|
|
|
// serialize crc32, aka checksum
|
2015-10-01 15:47:29 -04:00
|
|
|
hsgr_output_stream.write((char *)&edges_crc32, sizeof(unsigned));
|
2014-07-03 07:29:15 -04:00
|
|
|
// serialize number of nodes
|
2016-10-21 18:24:55 -04:00
|
|
|
hsgr_output_stream.write((char *)&node_array_size, sizeof(std::uint64_t));
|
2014-07-03 07:29:15 -04:00
|
|
|
// serialize number of edges
|
2016-10-21 18:24:55 -04:00
|
|
|
hsgr_output_stream.write((char *)&contracted_edge_count, sizeof(std::uint64_t));
|
2014-07-03 07:29:15 -04:00
|
|
|
// serialize all nodes
|
|
|
|
if (node_array_size > 0)
|
|
|
|
{
|
|
|
|
hsgr_output_stream.write((char *)&node_array[0],
|
2016-01-07 19:31:57 -05:00
|
|
|
sizeof(util::StaticGraph<EdgeData>::NodeArrayEntry) *
|
|
|
|
node_array_size);
|
2014-07-03 07:29:15 -04:00
|
|
|
}
|
|
|
|
|
2015-04-23 11:48:41 -04:00
|
|
|
// serialize all edges
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log() << "Building edge array";
|
2016-06-11 11:23:29 -04:00
|
|
|
std::size_t number_of_used_edges = 0;
|
2014-07-03 07:29:15 -04:00
|
|
|
|
2016-01-05 10:51:13 -05:00
|
|
|
util::StaticGraph<EdgeData>::EdgeArrayEntry current_edge;
|
2016-04-12 06:42:16 -04:00
|
|
|
for (const auto edge : util::irange<std::size_t>(0UL, contracted_edge_list.size()))
|
2014-07-03 07:29:15 -04:00
|
|
|
{
|
2016-01-07 04:33:47 -05:00
|
|
|
// some self-loops are required for oneway handling. Need to assertthat we only keep these
|
|
|
|
// (TODO)
|
2014-07-03 07:29:15 -04:00
|
|
|
// no eigen loops
|
2016-01-07 04:33:47 -05:00
|
|
|
// BOOST_ASSERT(contracted_edge_list[edge].source != contracted_edge_list[edge].target ||
|
|
|
|
// node_represents_oneway[contracted_edge_list[edge].source]);
|
2015-09-09 12:34:09 -04:00
|
|
|
current_edge.target = contracted_edge_list[edge].target;
|
|
|
|
current_edge.data = contracted_edge_list[edge].data;
|
2014-07-03 07:29:15 -04:00
|
|
|
|
|
|
|
// every target needs to be valid
|
2015-07-01 11:55:54 -04:00
|
|
|
BOOST_ASSERT(current_edge.target <= max_used_node_id);
|
2014-07-03 07:29:15 -04:00
|
|
|
#ifndef NDEBUG
|
2016-05-12 12:50:10 -04:00
|
|
|
if (current_edge.data.weight <= 0)
|
2014-07-03 07:29:15 -04:00
|
|
|
{
|
2016-12-06 15:30:46 -05:00
|
|
|
util::Log(logWARNING) << "Edge: " << edge
|
|
|
|
<< ",source: " << contracted_edge_list[edge].source
|
|
|
|
<< ", target: " << contracted_edge_list[edge].target
|
|
|
|
<< ", weight: " << current_edge.data.weight;
|
|
|
|
|
|
|
|
util::Log(logWARNING) << "Failed at adjacency list of node "
|
|
|
|
<< contracted_edge_list[edge].source << "/"
|
|
|
|
<< node_array.size() - 1;
|
|
|
|
throw util::exception("Edge weight is <= 0" + SOURCE_REF);
|
2014-07-03 07:29:15 -04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
hsgr_output_stream.write((char *)¤t_edge,
|
2016-01-05 10:51:13 -05:00
|
|
|
sizeof(util::StaticGraph<EdgeData>::EdgeArrayEntry));
|
2014-07-03 07:29:15 -04:00
|
|
|
|
|
|
|
++number_of_used_edges;
|
|
|
|
}
|
|
|
|
|
2015-04-23 12:53:36 -04:00
|
|
|
return number_of_used_edges;
|
2014-07-03 07:29:15 -04:00
|
|
|
}
|
|
|
|
|
2015-04-23 11:48:41 -04:00
|
|
|
/**
|
|
|
|
\brief Build contracted graph.
|
|
|
|
*/
|
2016-01-07 13:19:55 -05:00
|
|
|
void Contractor::ContractGraph(
|
2016-06-11 11:23:29 -04:00
|
|
|
const EdgeID max_edge_id,
|
2016-01-07 19:31:57 -05:00
|
|
|
util::DeallocatingVector<extractor::EdgeBasedEdge> &edge_based_edge_list,
|
|
|
|
util::DeallocatingVector<QueryEdge> &contracted_edge_list,
|
2016-01-07 04:33:47 -05:00
|
|
|
std::vector<EdgeWeight> &&node_weights,
|
2016-01-07 19:31:57 -05:00
|
|
|
std::vector<bool> &is_core_node,
|
|
|
|
std::vector<float> &inout_node_levels) const
|
2015-04-23 11:48:41 -04:00
|
|
|
{
|
2015-11-09 15:14:39 -05:00
|
|
|
std::vector<float> node_levels;
|
|
|
|
node_levels.swap(inout_node_levels);
|
|
|
|
|
2016-05-27 15:05:04 -04:00
|
|
|
GraphContractor graph_contractor(
|
|
|
|
max_edge_id + 1, edge_based_edge_list, std::move(node_levels), std::move(node_weights));
|
2016-01-07 13:19:55 -05:00
|
|
|
graph_contractor.Run(config.core_factor);
|
|
|
|
graph_contractor.GetEdges(contracted_edge_list);
|
|
|
|
graph_contractor.GetCoreMarker(is_core_node);
|
|
|
|
graph_contractor.GetNodeLevels(inout_node_levels);
|
2015-04-23 11:48:41 -04:00
|
|
|
}
|
2016-01-05 10:51:13 -05:00
|
|
|
}
|
|
|
|
}
|