Fold parsing and data loading in concurrently executing action
This commit is contained in:
parent
2fd1b6c913
commit
0a73737f69
@ -279,20 +279,18 @@ std::size_t Contractor::LoadEdgeExpandedGraph(
|
|||||||
turn_penalty_lookup = parse_turn_penalty_lookup_from_csv_files(turn_penalty_filenames);
|
turn_penalty_lookup = parse_turn_penalty_lookup_from_csv_files(turn_penalty_filenames);
|
||||||
};
|
};
|
||||||
|
|
||||||
tbb::parallel_invoke(parse_segment_speeds, parse_turn_penalties);
|
|
||||||
|
|
||||||
// If we update the edge weights, this file will hold the datasource information for each
|
// If we update the edge weights, this file will hold the datasource information for each
|
||||||
// segment
|
// segment; the other files will also be conditionally filled concurrently if we make an update
|
||||||
std::vector<uint8_t> m_geometry_datasource;
|
std::vector<uint8_t> m_geometry_datasource;
|
||||||
|
|
||||||
if (update_edge_weights || update_turn_penalties)
|
|
||||||
{
|
|
||||||
// Here, we have to update the compressed geometry weights
|
|
||||||
// First, we need the external-to-internal node lookup table
|
|
||||||
|
|
||||||
std::vector<extractor::QueryNode> internal_to_external_node_map;
|
std::vector<extractor::QueryNode> internal_to_external_node_map;
|
||||||
|
std::vector<unsigned> m_geometry_indices;
|
||||||
|
std::vector<extractor::CompressedEdgeContainer::CompressedEdge> m_geometry_list;
|
||||||
|
|
||||||
|
const auto maybe_load_internal_to_external_node_map = [&] {
|
||||||
|
if (!(update_edge_weights || update_turn_penalties))
|
||||||
|
return;
|
||||||
|
|
||||||
const auto load_internal_to_external_node_map = [&] {
|
|
||||||
boost::filesystem::ifstream nodes_input_stream(nodes_filename, std::ios::binary);
|
boost::filesystem::ifstream nodes_input_stream(nodes_filename, std::ios::binary);
|
||||||
|
|
||||||
if (!nodes_input_stream)
|
if (!nodes_input_stream)
|
||||||
@ -309,10 +307,10 @@ std::size_t Contractor::LoadEdgeExpandedGraph(
|
|||||||
number_of_nodes * sizeof(extractor::QueryNode));
|
number_of_nodes * sizeof(extractor::QueryNode));
|
||||||
};
|
};
|
||||||
|
|
||||||
std::vector<unsigned> m_geometry_indices;
|
const auto maybe_load_geometries = [&] {
|
||||||
std::vector<extractor::CompressedEdgeContainer::CompressedEdge> m_geometry_list;
|
if (!(update_edge_weights || update_turn_penalties))
|
||||||
|
return;
|
||||||
|
|
||||||
const auto load_geometries = [&] {
|
|
||||||
std::ifstream geometry_stream(geometry_filename, std::ios::binary);
|
std::ifstream geometry_stream(geometry_filename, std::ios::binary);
|
||||||
if (!geometry_stream)
|
if (!geometry_stream)
|
||||||
{
|
{
|
||||||
@ -337,14 +335,20 @@ std::size_t Contractor::LoadEdgeExpandedGraph(
|
|||||||
|
|
||||||
if (number_of_compressed_geometries > 0)
|
if (number_of_compressed_geometries > 0)
|
||||||
{
|
{
|
||||||
geometry_stream.read(
|
geometry_stream.read((char *)&(m_geometry_list[0]),
|
||||||
(char *)&(m_geometry_list[0]),
|
|
||||||
number_of_compressed_geometries *
|
number_of_compressed_geometries *
|
||||||
sizeof(extractor::CompressedEdgeContainer::CompressedEdge));
|
sizeof(extractor::CompressedEdgeContainer::CompressedEdge));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
tbb::parallel_invoke(load_internal_to_external_node_map, load_geometries);
|
// Folds all our actions into independently concurrently executing lambdas
|
||||||
|
tbb::parallel_invoke(parse_segment_speeds, parse_turn_penalties, //
|
||||||
|
maybe_load_internal_to_external_node_map, maybe_load_geometries);
|
||||||
|
|
||||||
|
if (update_edge_weights || update_turn_penalties)
|
||||||
|
{
|
||||||
|
// Here, we have to update the compressed geometry weights
|
||||||
|
// First, we need the external-to-internal node lookup table
|
||||||
|
|
||||||
// This is a list of the "data source id" for every segment in the compressed
|
// This is a list of the "data source id" for every segment in the compressed
|
||||||
// geometry container. We assume that everything so far has come from the
|
// geometry container. We assume that everything so far has come from the
|
||||||
|
Loading…
Reference in New Issue
Block a user